• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15// DO NOT EDIT
16// This file was machine generated by github.com/tensorflow/tensorflow/tensorflow/go/genop/internal
17//
18// WARNING: This generation of wrapper function for TensorFlow ops is in an
19// experimental state. The generated API can change without notice.
20
21package op
22
23import tf "github.com/tensorflow/tensorflow/tensorflow/go"
24
25// optionalAttr is an intentionally un-exported type to hide
26// details of how optional attributes to operations are implemented.
27type optionalAttr map[string]interface{}
28
29func makeOutputList(op *tf.Operation, start int, output string) ([]tf.Output, int, error) {
30	size, err := op.OutputListSize(output)
31	if err != nil {
32		return nil, start, err
33	}
34	list := make([]tf.Output, size)
35	for i := 0; i < size; i++ {
36		list[i] = op.Output(start + i)
37	}
38	return list, start + size, nil
39}
40
41// An op used by XLA SPMD partitioner to switch from manual partitioning to
42//
43// automatic partitioning. It converts the shard-shaped, manually partitioned input
44// into full-shaped tensor to be partitioned automatically with the same sharding
45// used by manual partitioning.
46func XlaSpmdShardToFullShape(scope *Scope, input tf.Output, manual_sharding string, full_shape tf.Shape) (output tf.Output) {
47	if scope.Err() != nil {
48		return
49	}
50	attrs := map[string]interface{}{"manual_sharding": manual_sharding, "full_shape": full_shape}
51	opspec := tf.OpSpec{
52		Type: "XlaSpmdShardToFullShape",
53		Input: []tf.Input{
54			input,
55		},
56		Attrs: attrs,
57	}
58	op := scope.AddOperation(opspec)
59	return op.Output(0)
60}
61
62// Wraps the XLA Sort operator, documented at
63//
64//  https://www.tensorflow.org/performance/xla/operation_semantics#sort
65// .
66//
67// Sorts a tensor. Currently only sorts in ascending order are supported.
68//
69// Arguments:
70//	input: A `Tensor` of type T.
71//
72// Returns A `Tensor` of type T.
73func XlaSort(scope *Scope, input tf.Output) (output tf.Output) {
74	if scope.Err() != nil {
75		return
76	}
77	opspec := tf.OpSpec{
78		Type: "XlaSort",
79		Input: []tf.Input{
80			input,
81		},
82	}
83	op := scope.AddOperation(opspec)
84	return op.Output(0)
85}
86
87// Receives the named tensor from another XLA computation. Wraps the XLA Recv
88//
89// operator documented at
90//  https://www.tensorflow.org/performance/xla/operation_semantics#recv .
91//
92// Arguments:
93//	dtype: The type of the tensor.
94//	tensor_name: A string key that identifies the channel.
95//	shape: The shape of the tensor.
96//
97// Returns The tensor to receive.
98func XlaRecv(scope *Scope, dtype tf.DataType, tensor_name string, shape tf.Shape) (tensor tf.Output) {
99	if scope.Err() != nil {
100		return
101	}
102	attrs := map[string]interface{}{"dtype": dtype, "tensor_name": tensor_name, "shape": shape}
103	opspec := tf.OpSpec{
104		Type: "XlaRecv",
105
106		Attrs: attrs,
107	}
108	op := scope.AddOperation(opspec)
109	return op.Output(0)
110}
111
112// Wraps the XLA DynamicSlice operator, documented at
113//
114//  https://www.tensorflow.org/performance/xla/operation_semantics#dynamicslice
115// .
116//
117// DynamicSlice extracts a sub-array from the input array at dynamic
118// start_indices. The size of the slice in each dimension is passed in
119// size_indices, which specify the end point of exclusive slice intervals in each
120// dimension -- [start, start + size). The shape of start_indices must have rank 1,
121// with dimension size equal to the rank of operand.
122//
123// Arguments:
124//	input: A `Tensor` of type T.
125//	start_indices: List of N integers containing the slice size for each
126// dimension. Each value must be strictly greater than zero, and start + size
127// must be less than or equal to the size of the dimension to avoid
128// implementation defined behavior.
129//
130func XlaDynamicSlice(scope *Scope, input tf.Output, start_indices tf.Output, size_indices tf.Output) (output tf.Output) {
131	if scope.Err() != nil {
132		return
133	}
134	opspec := tf.OpSpec{
135		Type: "XlaDynamicSlice",
136		Input: []tf.Input{
137			input, start_indices, size_indices,
138		},
139	}
140	op := scope.AddOperation(opspec)
141	return op.Output(0)
142}
143
144// Set a bound for the given input value as a hint to Xla compiler,
145//
146//         returns the same value.
147func XlaSetBound(scope *Scope, input tf.Output, bound tf.Output) (output tf.Output) {
148	if scope.Err() != nil {
149		return
150	}
151	opspec := tf.OpSpec{
152		Type: "XlaSetBound",
153		Input: []tf.Input{
154			input, bound,
155		},
156	}
157	op := scope.AddOperation(opspec)
158	return op.Output(0)
159}
160
161// Wraps the XLA DotGeneral operator, documented at
162//
163//  https://www.tensorflow.org/performance/xla/operation_semantics#dotgeneral
164// .
165//
166// Arguments:
167//	lhs: the LHS tensor
168//	rhs: the RHS tensor
169//	dimension_numbers: a serialized xla::DotDimensionNumbers proto.
170//	precision_config: a serialized xla::PrecisionConfig proto.
171func XlaDot(scope *Scope, lhs tf.Output, rhs tf.Output, dimension_numbers string, precision_config string) (output tf.Output) {
172	if scope.Err() != nil {
173		return
174	}
175	attrs := map[string]interface{}{"dimension_numbers": dimension_numbers, "precision_config": precision_config}
176	opspec := tf.OpSpec{
177		Type: "XlaDot",
178		Input: []tf.Input{
179			lhs, rhs,
180		},
181		Attrs: attrs,
182	}
183	op := scope.AddOperation(opspec)
184	return op.Output(0)
185}
186
187// Output a fact about factorials.
188func Fact(scope *Scope) (fact tf.Output) {
189	if scope.Err() != nil {
190		return
191	}
192	opspec := tf.OpSpec{
193		Type: "Fact",
194	}
195	op := scope.AddOperation(opspec)
196	return op.Output(0)
197}
198
199// FakeQuantWithMinMaxVarsGradientAttr is an optional argument to FakeQuantWithMinMaxVarsGradient.
200type FakeQuantWithMinMaxVarsGradientAttr func(optionalAttr)
201
202// FakeQuantWithMinMaxVarsGradientNumBits sets the optional num_bits attribute to value.
203//
204// value: The bitwidth of the quantization; between 2 and 8, inclusive.
205// If not specified, defaults to 8
206func FakeQuantWithMinMaxVarsGradientNumBits(value int64) FakeQuantWithMinMaxVarsGradientAttr {
207	return func(m optionalAttr) {
208		m["num_bits"] = value
209	}
210}
211
212// FakeQuantWithMinMaxVarsGradientNarrowRange sets the optional narrow_range attribute to value.
213//
214// value: Whether to quantize into 2^num_bits - 1 distinct values.
215// If not specified, defaults to false
216func FakeQuantWithMinMaxVarsGradientNarrowRange(value bool) FakeQuantWithMinMaxVarsGradientAttr {
217	return func(m optionalAttr) {
218		m["narrow_range"] = value
219	}
220}
221
222// Compute gradients for a FakeQuantWithMinMaxVars operation.
223//
224// Arguments:
225//	gradients: Backpropagated gradients above the FakeQuantWithMinMaxVars operation.
226//	inputs: Values passed as inputs to the FakeQuantWithMinMaxVars operation.
227// min, max: Quantization interval, scalar floats.
228//
229//
230//
231// Returns:
232//	backprops_wrt_input: Backpropagated gradients w.r.t. inputs:
233// `gradients * (inputs >= min && inputs <= max)`.
234//	backprop_wrt_min: Backpropagated gradients w.r.t. min parameter:
235// `sum(gradients * (inputs < min))`.
236//	backprop_wrt_max: Backpropagated gradients w.r.t. max parameter:
237// `sum(gradients * (inputs > max))`.
238func FakeQuantWithMinMaxVarsGradient(scope *Scope, gradients tf.Output, inputs tf.Output, min tf.Output, max tf.Output, optional ...FakeQuantWithMinMaxVarsGradientAttr) (backprops_wrt_input tf.Output, backprop_wrt_min tf.Output, backprop_wrt_max tf.Output) {
239	if scope.Err() != nil {
240		return
241	}
242	attrs := map[string]interface{}{}
243	for _, a := range optional {
244		a(attrs)
245	}
246	opspec := tf.OpSpec{
247		Type: "FakeQuantWithMinMaxVarsGradient",
248		Input: []tf.Input{
249			gradients, inputs, min, max,
250		},
251		Attrs: attrs,
252	}
253	op := scope.AddOperation(opspec)
254	return op.Output(0), op.Output(1), op.Output(2)
255}
256
257// Computes the eigen decomposition of a batch of self-adjoint matrices
258//
259// (Note: Only real inputs are supported).
260//
261// Computes the eigenvalues and eigenvectors of the innermost M-by-N matrices in
262// tensor such that tensor[...,:,:] = u[..., :, :] * Diag(s[..., :]) * Transpose(v[...,:,:]).
263//
264// Arguments:
265//	a: the input tensor.
266//	max_iter: maximum number of sweep update, i.e., the whole lower triangular
267// part or upper triangular part based on parameter lower. Heuristically, it has
268// been argued that approximately log(min (M, N)) sweeps are needed in practice
269// (Ref: Golub & van Loan "Matrix Computation").
270//	epsilon: the tolerance ratio.
271//	precision_config: a serialized xla::PrecisionConfig proto.
272//
273// Returns:
274//	s: Singular values. The values are sorted in reverse order of magnitude, so
275// s[..., 0] is the largest value, s[..., 1] is the second largest, etc.
276//	u: Left singular vectors.
277//	v: Right singular vectors.
278func XlaSvd(scope *Scope, a tf.Output, max_iter int64, epsilon float32, precision_config string) (s tf.Output, u tf.Output, v tf.Output) {
279	if scope.Err() != nil {
280		return
281	}
282	attrs := map[string]interface{}{"max_iter": max_iter, "epsilon": epsilon, "precision_config": precision_config}
283	opspec := tf.OpSpec{
284		Type: "XlaSvd",
285		Input: []tf.Input{
286			a,
287		},
288		Attrs: attrs,
289	}
290	op := scope.AddOperation(opspec)
291	return op.Output(0), op.Output(1), op.Output(2)
292}
293
294// FakeQuantWithMinMaxArgsGradientAttr is an optional argument to FakeQuantWithMinMaxArgsGradient.
295type FakeQuantWithMinMaxArgsGradientAttr func(optionalAttr)
296
297// FakeQuantWithMinMaxArgsGradientMin sets the optional min attribute to value.
298// If not specified, defaults to -6
299func FakeQuantWithMinMaxArgsGradientMin(value float32) FakeQuantWithMinMaxArgsGradientAttr {
300	return func(m optionalAttr) {
301		m["min"] = value
302	}
303}
304
305// FakeQuantWithMinMaxArgsGradientMax sets the optional max attribute to value.
306// If not specified, defaults to 6
307func FakeQuantWithMinMaxArgsGradientMax(value float32) FakeQuantWithMinMaxArgsGradientAttr {
308	return func(m optionalAttr) {
309		m["max"] = value
310	}
311}
312
313// FakeQuantWithMinMaxArgsGradientNumBits sets the optional num_bits attribute to value.
314// If not specified, defaults to 8
315func FakeQuantWithMinMaxArgsGradientNumBits(value int64) FakeQuantWithMinMaxArgsGradientAttr {
316	return func(m optionalAttr) {
317		m["num_bits"] = value
318	}
319}
320
321// FakeQuantWithMinMaxArgsGradientNarrowRange sets the optional narrow_range attribute to value.
322// If not specified, defaults to false
323func FakeQuantWithMinMaxArgsGradientNarrowRange(value bool) FakeQuantWithMinMaxArgsGradientAttr {
324	return func(m optionalAttr) {
325		m["narrow_range"] = value
326	}
327}
328
329// Compute gradients for a FakeQuantWithMinMaxArgs operation.
330//
331// Arguments:
332//	gradients: Backpropagated gradients above the FakeQuantWithMinMaxArgs operation.
333//	inputs: Values passed as inputs to the FakeQuantWithMinMaxArgs operation.
334//
335// Returns Backpropagated gradients below the FakeQuantWithMinMaxArgs operation:
336// `gradients * (inputs >= min && inputs <= max)`.
337func FakeQuantWithMinMaxArgsGradient(scope *Scope, gradients tf.Output, inputs tf.Output, optional ...FakeQuantWithMinMaxArgsGradientAttr) (backprops tf.Output) {
338	if scope.Err() != nil {
339		return
340	}
341	attrs := map[string]interface{}{}
342	for _, a := range optional {
343		a(attrs)
344	}
345	opspec := tf.OpSpec{
346		Type: "FakeQuantWithMinMaxArgsGradient",
347		Input: []tf.Input{
348			gradients, inputs,
349		},
350		Attrs: attrs,
351	}
352	op := scope.AddOperation(opspec)
353	return op.Output(0)
354}
355
356// Helper operator for performing XLA-style broadcasts
357//
358// Broadcasts `lhs` and `rhs` to the same rank, by adding size 1 dimensions to
359// whichever of `lhs` and `rhs` has the lower rank, using XLA's broadcasting rules
360// for binary operators.
361//
362// Arguments:
363//	lhs: the LHS input tensor
364//	rhs: the RHS input tensor
365//	broadcast_dims: an XLA-style broadcast dimension specification
366//
367// Returns:
368//	lhs_output: the broadcasted LHS tensor
369//	rhs_output: the broadcasted RHS tensor
370func XlaBroadcastHelper(scope *Scope, lhs tf.Output, rhs tf.Output, broadcast_dims tf.Output) (lhs_output tf.Output, rhs_output tf.Output) {
371	if scope.Err() != nil {
372		return
373	}
374	opspec := tf.OpSpec{
375		Type: "XlaBroadcastHelper",
376		Input: []tf.Input{
377			lhs, rhs, broadcast_dims,
378		},
379	}
380	op := scope.AddOperation(opspec)
381	return op.Output(0), op.Output(1)
382}
383
384// Subtracts sparse `updates` from an existing tensor according to `indices`.
385//
386// This operation creates a new tensor by subtracting sparse `updates` from the
387// passed in `tensor`.
388// This operation is very similar to `tf.scatter_nd_sub`, except that the updates
389// are subtracted from an existing tensor (as opposed to a variable). If the memory
390// for the existing tensor cannot be re-used, a copy is made and updated.
391//
392// `indices` is an integer tensor containing indices into a new tensor of shape
393// `shape`.  The last dimension of `indices` can be at most the rank of `shape`:
394//
395//     indices.shape[-1] <= shape.rank
396//
397// The last dimension of `indices` corresponds to indices into elements
398// (if `indices.shape[-1] = shape.rank`) or slices
399// (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of
400// `shape`.  `updates` is a tensor with shape
401//
402//     indices.shape[:-1] + shape[indices.shape[-1]:]
403//
404// The simplest form of tensor_scatter_sub is to subtract individual elements
405// from a tensor by index. For example, say we want to insert 4 scattered elements
406// in a rank-1 tensor with 8 elements.
407//
408// In Python, this scatter subtract operation would look like this:
409//
410// ```python
411//     indices = tf.constant([[4], [3], [1], [7]])
412//     updates = tf.constant([9, 10, 11, 12])
413//     tensor = tf.ones([8], dtype=tf.int32)
414//     updated = tf.tensor_scatter_nd_sub(tensor, indices, updates)
415//     print(updated)
416// ```
417//
418// The resulting tensor would look like this:
419//
420//     [1, -10, 1, -9, -8, 1, 1, -11]
421//
422// We can also, insert entire slices of a higher rank tensor all at once. For
423// example, if we wanted to insert two slices in the first dimension of a
424// rank-3 tensor with two matrices of new values.
425//
426// In Python, this scatter add operation would look like this:
427//
428// ```python
429//     indices = tf.constant([[0], [2]])
430//     updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
431//                             [7, 7, 7, 7], [8, 8, 8, 8]],
432//                            [[5, 5, 5, 5], [6, 6, 6, 6],
433//                             [7, 7, 7, 7], [8, 8, 8, 8]]])
434//     tensor = tf.ones([4, 4, 4],dtype=tf.int32)
435//     updated = tf.tensor_scatter_nd_sub(tensor, indices, updates)
436//     print(updated)
437// ```
438//
439// The resulting tensor would look like this:
440//
441//     [[[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]],
442//      [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]],
443//      [[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]],
444//      [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]]
445//
446// Note that on CPU, if an out of bound index is found, an error is returned.
447// On GPU, if an out of bound index is found, the index is ignored.
448//
449// Arguments:
450//	tensor: Tensor to copy/update.
451//	indices: Index tensor.
452//	updates: Updates to scatter into output.
453//
454// Returns A new tensor copied from tensor and updates subtracted according to the indices.
455func TensorScatterSub(scope *Scope, tensor tf.Output, indices tf.Output, updates tf.Output) (output tf.Output) {
456	if scope.Err() != nil {
457		return
458	}
459	opspec := tf.OpSpec{
460		Type: "TensorScatterSub",
461		Input: []tf.Input{
462			tensor, indices, updates,
463		},
464	}
465	op := scope.AddOperation(opspec)
466	return op.Output(0)
467}
468
469// Adds sparse `updates` to an existing tensor according to `indices`.
470//
471// This operation creates a new tensor by adding sparse `updates` to the passed
472// in `tensor`.
473// This operation is very similar to `tf.scatter_nd_add`, except that the updates
474// are added onto an existing tensor (as opposed to a variable). If the memory
475// for the existing tensor cannot be re-used, a copy is made and updated.
476//
477// `indices` is an integer tensor containing indices into a new tensor of shape
478// `tensor.shape`.  The last dimension of `indices` can be at most the rank of
479// `tensor.shape`:
480//
481//     indices.shape[-1] <= tensor.shape.rank
482//
483// The last dimension of `indices` corresponds to indices into elements
484// (if `indices.shape[-1] = tensor.shape.rank`) or slices
485// (if `indices.shape[-1] < tensor.shape.rank`) along dimension
486// `indices.shape[-1]` of `tensor.shape`.  `updates` is a tensor with shape
487//
488//     indices.shape[:-1] + tensor.shape[indices.shape[-1]:]
489//
490// The simplest form of tensor_scatter_add is to add individual elements to a
491// tensor by index. For example, say we want to add 4 elements in a rank-1
492// tensor with 8 elements.
493//
494// In Python, this scatter add operation would look like this:
495//
496// ```python
497//     indices = tf.constant([[4], [3], [1], [7]])
498//     updates = tf.constant([9, 10, 11, 12])
499//     tensor = tf.ones([8], dtype=tf.int32)
500//     updated = tf.tensor_scatter_nd_add(tensor, indices, updates)
501//     print(updated)
502// ```
503//
504// The resulting tensor would look like this:
505//
506//     [1, 12, 1, 11, 10, 1, 1, 13]
507//
508// We can also, insert entire slices of a higher rank tensor all at once. For
509// example, if we wanted to insert two slices in the first dimension of a
510// rank-3 tensor with two matrices of new values.
511//
512// In Python, this scatter add operation would look like this:
513//
514// ```python
515//     indices = tf.constant([[0], [2]])
516//     updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
517//                             [7, 7, 7, 7], [8, 8, 8, 8]],
518//                            [[5, 5, 5, 5], [6, 6, 6, 6],
519//                             [7, 7, 7, 7], [8, 8, 8, 8]]])
520//     tensor = tf.ones([4, 4, 4],dtype=tf.int32)
521//     updated = tf.tensor_scatter_nd_add(tensor, indices, updates)
522//     print(updated)
523// ```
524//
525// The resulting tensor would look like this:
526//
527//     [[[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]],
528//      [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]],
529//      [[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]],
530//      [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]]
531//
532// Note that on CPU, if an out of bound index is found, an error is returned.
533// On GPU, if an out of bound index is found, the index is ignored.
534//
535// Arguments:
536//	tensor: Tensor to copy/update.
537//	indices: Index tensor.
538//	updates: Updates to scatter into output.
539//
540// Returns A new tensor copied from tensor and updates added according to the indices.
541func TensorScatterAdd(scope *Scope, tensor tf.Output, indices tf.Output, updates tf.Output) (output tf.Output) {
542	if scope.Err() != nil {
543		return
544	}
545	opspec := tf.OpSpec{
546		Type: "TensorScatterAdd",
547		Input: []tf.Input{
548			tensor, indices, updates,
549		},
550	}
551	op := scope.AddOperation(opspec)
552	return op.Output(0)
553}
554
555// Reshapes a quantized tensor as per the Reshape op.
556//
557// ```
558//
559// Arguments:
560//
561//	shape: Defines the shape of the output tensor.
562//	input_min: The minimum value of the input.
563//	input_max: The maximum value of the input.
564//
565// Returns:
566//	output
567//	output_min: This value is copied from input_min.
568//	output_max: This value is copied from input_max.
569func QuantizedReshape(scope *Scope, tensor tf.Output, shape tf.Output, input_min tf.Output, input_max tf.Output) (output tf.Output, output_min tf.Output, output_max tf.Output) {
570	if scope.Err() != nil {
571		return
572	}
573	opspec := tf.OpSpec{
574		Type: "QuantizedReshape",
575		Input: []tf.Input{
576			tensor, shape, input_min, input_max,
577		},
578	}
579	op := scope.AddOperation(opspec)
580	return op.Output(0), op.Output(1), op.Output(2)
581}
582
583// QuantizeAndDequantizeV4GradAttr is an optional argument to QuantizeAndDequantizeV4Grad.
584type QuantizeAndDequantizeV4GradAttr func(optionalAttr)
585
586// QuantizeAndDequantizeV4GradAxis sets the optional axis attribute to value.
587// If not specified, defaults to -1
588func QuantizeAndDequantizeV4GradAxis(value int64) QuantizeAndDequantizeV4GradAttr {
589	return func(m optionalAttr) {
590		m["axis"] = value
591	}
592}
593
594// Returns the gradient of `QuantizeAndDequantizeV4`.
595//
596// Returns a gradient of 1 for inputs that are within the quantization range,
597// or 0 otherwise.
598func QuantizeAndDequantizeV4Grad(scope *Scope, gradients tf.Output, input tf.Output, input_min tf.Output, input_max tf.Output, optional ...QuantizeAndDequantizeV4GradAttr) (input_backprop tf.Output, input_min_backprop tf.Output, input_max_backprop tf.Output) {
599	if scope.Err() != nil {
600		return
601	}
602	attrs := map[string]interface{}{}
603	for _, a := range optional {
604		a(attrs)
605	}
606	opspec := tf.OpSpec{
607		Type: "QuantizeAndDequantizeV4Grad",
608		Input: []tf.Input{
609			gradients, input, input_min, input_max,
610		},
611		Attrs: attrs,
612	}
613	op := scope.AddOperation(opspec)
614	return op.Output(0), op.Output(1), op.Output(2)
615}
616
617// QuantizeAndDequantizeV2Attr is an optional argument to QuantizeAndDequantizeV2.
618type QuantizeAndDequantizeV2Attr func(optionalAttr)
619
620// QuantizeAndDequantizeV2SignedInput sets the optional signed_input attribute to value.
621//
622// value: Whether the quantization is signed or unsigned. (actually this parameter should
623// have been called <b>`signed_output`</b>)
624// If not specified, defaults to true
625func QuantizeAndDequantizeV2SignedInput(value bool) QuantizeAndDequantizeV2Attr {
626	return func(m optionalAttr) {
627		m["signed_input"] = value
628	}
629}
630
631// QuantizeAndDequantizeV2NumBits sets the optional num_bits attribute to value.
632//
633// value: The bitwidth of the quantization.
634// If not specified, defaults to 8
635func QuantizeAndDequantizeV2NumBits(value int64) QuantizeAndDequantizeV2Attr {
636	return func(m optionalAttr) {
637		m["num_bits"] = value
638	}
639}
640
641// QuantizeAndDequantizeV2RangeGiven sets the optional range_given attribute to value.
642//
643// value: Whether the range is given or should be determined from the `input` tensor.
644// If not specified, defaults to false
645func QuantizeAndDequantizeV2RangeGiven(value bool) QuantizeAndDequantizeV2Attr {
646	return func(m optionalAttr) {
647		m["range_given"] = value
648	}
649}
650
651// QuantizeAndDequantizeV2RoundMode sets the optional round_mode attribute to value.
652//
653// value: The 'round_mode' attribute controls which rounding tie-breaking algorithm is
654// used when rounding float values to their quantized equivalents. The following
655// rounding modes are currently supported:
656//
657// *   HALF_TO_EVEN: this is the default round_mode.
658// *   HALF_UP: round towards positive. In this mode 7.5 rounds up to 8 and -7.5
659//     rounds up to -7.
660//
661// If not specified, defaults to "HALF_TO_EVEN"
662func QuantizeAndDequantizeV2RoundMode(value string) QuantizeAndDequantizeV2Attr {
663	return func(m optionalAttr) {
664		m["round_mode"] = value
665	}
666}
667
668// QuantizeAndDequantizeV2NarrowRange sets the optional narrow_range attribute to value.
669//
670// value: If True, then the absolute value of the quantized minimum value is the same as
671// the quantized maximum value, instead of 1 greater.
672// i.e. for 8 bit quantization, the minimum value is -127 instead of -128.
673// If not specified, defaults to false
674func QuantizeAndDequantizeV2NarrowRange(value bool) QuantizeAndDequantizeV2Attr {
675	return func(m optionalAttr) {
676		m["narrow_range"] = value
677	}
678}
679
680// QuantizeAndDequantizeV2Axis sets the optional axis attribute to value.
681//
682// value: If specified, this axis is treated as a channel or slice axis, and a separate
683// quantization range is used for each channel or slice along this axis.
684// If not specified, defaults to -1
685func QuantizeAndDequantizeV2Axis(value int64) QuantizeAndDequantizeV2Attr {
686	return func(m optionalAttr) {
687		m["axis"] = value
688	}
689}
690
691// Quantizes then dequantizes a tensor.
692//
693// This op simulates the precision loss from the quantized forward pass by:
694//
695// 1. Quantizing the tensor to fixed point numbers, which should match the target
696//    quantization method when it is used in inference.
697// 2. Dequantizing it back to floating point numbers for the following ops, most
698//    likely matmul.
699//
700// There are different ways to quantize. This version uses only scaling, so 0.0
701// maps to 0.
702//
703// From the specified 'num_bits' in the quantized output type, it determines
704// minimum and maximum representable quantized values.
705//
706// e.g.
707//
708// *   [-128, 127] for signed, num_bits = 8, or
709// *   [0, 255] for unsigned, num_bits = 8.
710//
711// If range_given == False, the initial input_min, input_max will be determined
712// automatically as the minimum and maximum values in the input tensor, otherwise
713// the specified values of input_min, input_max are used.
714//
715// Note: If the input_min, input_max are specified, they do not need to equal the
716// actual minimum and maximum values in the tensor. e.g. in some cases it may be
717// beneficial to specify these values such that the low probability extremes of the
718// input distribution are clipped.
719//
720// This op determines the maximum scale_factor that would map the initial
721// [input_min, input_max] range to a range that lies within the representable
722// quantized range.
723//
724// It determines the scale from one of input_min and input_max, then updates the
725// other one to maximize the representable range.
726//
727// e.g.
728//
729// *   if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0,
730//     5.0]: it would use a scale_factor of -128 / -10.0 = 12.8 In this case, it
731//     would update input_max to be 127 / 12.8 = 9.921875
732// *   if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0,
733//     10.0]: it would use a scale_factor of 127 / 10.0 = 12.7 In this case, it
734//     would update input_min to be 128.0 / 12.7 = -10.07874
735// *   if the output is unsigned, input_min is forced to be 0, and only the
736//     specified input_max is used.
737//
738// After determining the scale_factor and updating the input range, it applies the
739// following to each value in the 'input' tensor.
740//
741// output = round(clamp(value, input_min, input_max) * scale_factor) / scale_factor.
742//
743// The above round function rounds the value based on the given round_mode.
744//
745//
746// Arguments:
747//	input: Tensor to quantize and then dequantize.
748//	input_min: If `range_given == True`, this specifies the minimum input value that needs to
749// be represented, otherwise it is determined from the min value of the `input`
750// tensor.
751//	input_max: If `range_given == True`, this specifies the maximum input value that needs to
752// be represented, otherwise it is determined from the max value of the `input`
753// tensor.
754func QuantizeAndDequantizeV2(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, optional ...QuantizeAndDequantizeV2Attr) (output tf.Output) {
755	if scope.Err() != nil {
756		return
757	}
758	attrs := map[string]interface{}{}
759	for _, a := range optional {
760		a(attrs)
761	}
762	opspec := tf.OpSpec{
763		Type: "QuantizeAndDequantizeV2",
764		Input: []tf.Input{
765			input, input_min, input_max,
766		},
767		Attrs: attrs,
768	}
769	op := scope.AddOperation(opspec)
770	return op.Output(0)
771}
772
773// QuantizeAndDequantizeAttr is an optional argument to QuantizeAndDequantize.
774type QuantizeAndDequantizeAttr func(optionalAttr)
775
776// QuantizeAndDequantizeSignedInput sets the optional signed_input attribute to value.
777// If not specified, defaults to true
778func QuantizeAndDequantizeSignedInput(value bool) QuantizeAndDequantizeAttr {
779	return func(m optionalAttr) {
780		m["signed_input"] = value
781	}
782}
783
784// QuantizeAndDequantizeNumBits sets the optional num_bits attribute to value.
785// If not specified, defaults to 8
786func QuantizeAndDequantizeNumBits(value int64) QuantizeAndDequantizeAttr {
787	return func(m optionalAttr) {
788		m["num_bits"] = value
789	}
790}
791
792// QuantizeAndDequantizeRangeGiven sets the optional range_given attribute to value.
793// If not specified, defaults to false
794func QuantizeAndDequantizeRangeGiven(value bool) QuantizeAndDequantizeAttr {
795	return func(m optionalAttr) {
796		m["range_given"] = value
797	}
798}
799
800// QuantizeAndDequantizeInputMin sets the optional input_min attribute to value.
801// If not specified, defaults to 0
802func QuantizeAndDequantizeInputMin(value float32) QuantizeAndDequantizeAttr {
803	return func(m optionalAttr) {
804		m["input_min"] = value
805	}
806}
807
808// QuantizeAndDequantizeInputMax sets the optional input_max attribute to value.
809// If not specified, defaults to 0
810func QuantizeAndDequantizeInputMax(value float32) QuantizeAndDequantizeAttr {
811	return func(m optionalAttr) {
812		m["input_max"] = value
813	}
814}
815
816// Use QuantizeAndDequantizeV2 instead.
817//
818// DEPRECATED at GraphDef version 22: Replaced by QuantizeAndDequantizeV2
819func QuantizeAndDequantize(scope *Scope, input tf.Output, optional ...QuantizeAndDequantizeAttr) (output tf.Output) {
820	if scope.Err() != nil {
821		return
822	}
823	attrs := map[string]interface{}{}
824	for _, a := range optional {
825		a(attrs)
826	}
827	opspec := tf.OpSpec{
828		Type: "QuantizeAndDequantize",
829		Input: []tf.Input{
830			input,
831		},
832		Attrs: attrs,
833	}
834	op := scope.AddOperation(opspec)
835	return op.Output(0)
836}
837
838// OneHotAttr is an optional argument to OneHot.
839type OneHotAttr func(optionalAttr)
840
841// OneHotAxis sets the optional axis attribute to value.
842//
843// value: The axis to fill (default: -1, a new inner-most axis).
844// If not specified, defaults to -1
845func OneHotAxis(value int64) OneHotAttr {
846	return func(m optionalAttr) {
847		m["axis"] = value
848	}
849}
850
851// Returns a one-hot tensor.
852//
853// The locations represented by indices in `indices` take value `on_value`,
854// while all other locations take value `off_value`.
855//
856// If the input `indices` is rank `N`, the output will have rank `N+1`,
857// The new axis is created at dimension `axis` (default: the new axis is
858// appended at the end).
859//
860// If `indices` is a scalar the output shape will be a vector of length `depth`.
861//
862// If `indices` is a vector of length `features`, the output shape will be:
863// ```
864//   features x depth if axis == -1
865//   depth x features if axis == 0
866// ```
867//
868// If `indices` is a matrix (batch) with shape `[batch, features]`,
869// the output shape will be:
870// ```
871//   batch x features x depth if axis == -1
872//   batch x depth x features if axis == 1
873//   depth x batch x features if axis == 0
874// ```
875//
876//
877// Examples
878// =========
879//
880// Suppose that
881// ```
882//   indices = [0, 2, -1, 1]
883//   depth = 3
884//   on_value = 5.0
885//   off_value = 0.0
886//   axis = -1
887// ```
888//
889// Then output is `[4 x 3]`:
890// ```
891// output =
892//   [5.0 0.0 0.0]  // one_hot(0)
893//   [0.0 0.0 5.0]  // one_hot(2)
894//   [0.0 0.0 0.0]  // one_hot(-1)
895//   [0.0 5.0 0.0]  // one_hot(1)
896// ```
897//
898// Suppose that
899// ```
900//   indices = [0, 2, -1, 1]
901//   depth = 3
902//   on_value = 0.0
903//   off_value = 3.0
904//   axis = 0
905// ```
906//
907// Then output is `[3 x 4]`:
908// ```
909// output =
910//   [0.0 3.0 3.0 3.0]
911//   [3.0 3.0 3.0 0.0]
912//   [3.0 3.0 3.0 3.0]
913//   [3.0 0.0 3.0 3.0]
914// //  ^                one_hot(0)
915// //      ^            one_hot(2)
916// //          ^        one_hot(-1)
917// //              ^    one_hot(1)
918// ```
919//
920// Suppose that
921// ```
922//   indices = [[0, 2], [1, -1]]
923//   depth = 3
924//   on_value = 1.0
925//   off_value = 0.0
926//   axis = -1
927// ```
928//
929// Then output is `[2 x 2 x 3]`:
930// ```
931// output =
932//   [
933//     [1.0, 0.0, 0.0]  // one_hot(0)
934//     [0.0, 0.0, 1.0]  // one_hot(2)
935//   ][
936//     [0.0, 1.0, 0.0]  // one_hot(1)
937//     [0.0, 0.0, 0.0]  // one_hot(-1)
938//   ]
939// ```
940//
941// Arguments:
942//	indices: A tensor of indices.
943//	depth: A scalar defining the depth of the one hot dimension.
944//	on_value: A scalar defining the value to fill in output when `indices[j] = i`.
945//	off_value: A scalar defining the value to fill in output when `indices[j] != i`.
946//
947// Returns The one-hot tensor.
948func OneHot(scope *Scope, indices tf.Output, depth tf.Output, on_value tf.Output, off_value tf.Output, optional ...OneHotAttr) (output tf.Output) {
949	if scope.Err() != nil {
950		return
951	}
952	attrs := map[string]interface{}{}
953	for _, a := range optional {
954		a(attrs)
955	}
956	opspec := tf.OpSpec{
957		Type: "OneHot",
958		Input: []tf.Input{
959			indices, depth, on_value, off_value,
960		},
961		Attrs: attrs,
962	}
963	op := scope.AddOperation(opspec)
964	return op.Output(0)
965}
966
967// Extract `patches` from `input` and put them in the `"depth"` output dimension. 3D extension of `extract_image_patches`.
968//
969// Arguments:
970//	input: 5-D Tensor with shape `[batch, in_planes, in_rows, in_cols, depth]`.
971//	ksizes: The size of the sliding window for each dimension of `input`.
972//	strides: 1-D of length 5. How far the centers of two consecutive patches are in
973// `input`. Must be: `[1, stride_planes, stride_rows, stride_cols, 1]`.
974//	padding: The type of padding algorithm to use.
975//
976// The size-related attributes are specified as follows:
977//
978// ```python
979// ksizes = [1, ksize_planes, ksize_rows, ksize_cols, 1]
980// strides = [1, stride_planes, strides_rows, strides_cols, 1]
981// ```
982//
983// Returns 5-D Tensor with shape `[batch, out_planes, out_rows, out_cols,
984// ksize_planes * ksize_rows * ksize_cols * depth]` containing patches
985// with size `ksize_planes x ksize_rows x ksize_cols x depth` vectorized
986// in the "depth" dimension. Note `out_planes`, `out_rows` and `out_cols`
987// are the dimensions of the output patches.
988func ExtractVolumePatches(scope *Scope, input tf.Output, ksizes []int64, strides []int64, padding string) (patches tf.Output) {
989	if scope.Err() != nil {
990		return
991	}
992	attrs := map[string]interface{}{"ksizes": ksizes, "strides": strides, "padding": padding}
993	opspec := tf.OpSpec{
994		Type: "ExtractVolumePatches",
995		Input: []tf.Input{
996			input,
997		},
998		Attrs: attrs,
999	}
1000	op := scope.AddOperation(opspec)
1001	return op.Output(0)
1002}
1003
1004// DepthToSpaceAttr is an optional argument to DepthToSpace.
1005type DepthToSpaceAttr func(optionalAttr)
1006
1007// DepthToSpaceDataFormat sets the optional data_format attribute to value.
1008// If not specified, defaults to "NHWC"
1009func DepthToSpaceDataFormat(value string) DepthToSpaceAttr {
1010	return func(m optionalAttr) {
1011		m["data_format"] = value
1012	}
1013}
1014
1015// DepthToSpace for tensors of type T.
1016//
1017// Rearranges data from depth into blocks of spatial data.
1018// This is the reverse transformation of SpaceToDepth. More specifically,
1019// this op outputs a copy of the input tensor where values from the `depth`
1020// dimension are moved in spatial blocks to the `height` and `width` dimensions.
1021// The attr `block_size` indicates the input block size and how the data is moved.
1022//
1023//   * Chunks of data of size `block_size * block_size` from depth are rearranged
1024//     into non-overlapping blocks of size `block_size x block_size`
1025//   * The width the output tensor is `input_depth * block_size`, whereas the
1026//     height is `input_height * block_size`.
1027//   * The Y, X coordinates within each block of the output image are determined
1028//     by the high order component of the input channel index.
1029//   * The depth of the input tensor must be divisible by
1030//     `block_size * block_size`.
1031//
1032// The `data_format` attr specifies the layout of the input and output tensors
1033// with the following options:
1034//   "NHWC": `[ batch, height, width, channels ]`
1035//   "NCHW": `[ batch, channels, height, width ]`
1036//   "NCHW_VECT_C":
1037//       `qint8 [ batch, channels / 4, height, width, 4 ]`
1038//
1039// It is useful to consider the operation as transforming a 6-D Tensor.
1040// e.g. for data_format = NHWC,
1041//      Each element in the input tensor can be specified via 6 coordinates,
1042//      ordered by decreasing memory layout significance as:
1043//      n,iY,iX,bY,bX,oC  (where n=batch index, iX, iY means X or Y coordinates
1044//                         within the input image, bX, bY means coordinates
1045//                         within the output block, oC means output channels).
1046//      The output would be the input transposed to the following layout:
1047//      n,iY,bY,iX,bX,oC
1048//
1049// This operation is useful for resizing the activations between convolutions
1050// (but keeping all data), e.g. instead of pooling. It is also useful for training
1051// purely convolutional models.
1052//
1053// For example, given an input of shape `[1, 1, 1, 4]`, data_format = "NHWC" and
1054// block_size = 2:
1055//
1056// ```
1057// x = [[[[1, 2, 3, 4]]]]
1058//
1059// ```
1060//
1061// This operation will output a tensor of shape `[1, 2, 2, 1]`:
1062//
1063// ```
1064//    [[[[1], [2]],
1065//      [[3], [4]]]]
1066// ```
1067//
1068// Here, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`,
1069// the corresponding output will have 2x2 elements and will have a depth of
1070// 1 channel (1 = `4 / (block_size * block_size)`).
1071// The output element shape is `[2, 2, 1]`.
1072//
1073// For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g.
1074//
1075// ```
1076// x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
1077// ```
1078//
1079// This operation, for block size of 2, will return the following tensor of shape
1080// `[1, 2, 2, 3]`
1081//
1082// ```
1083//    [[[[1, 2, 3], [4, 5, 6]],
1084//      [[7, 8, 9], [10, 11, 12]]]]
1085//
1086// ```
1087//
1088// Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2:
1089//
1090// ```
1091// x =  [[[[1, 2, 3, 4],
1092//        [5, 6, 7, 8]],
1093//       [[9, 10, 11, 12],
1094//        [13, 14, 15, 16]]]]
1095// ```
1096//
1097// the operator will return the following tensor of shape `[1 4 4 1]`:
1098//
1099// ```
1100// x = [[[ [1],   [2],  [5],  [6]],
1101//       [ [3],   [4],  [7],  [8]],
1102//       [ [9],  [10], [13],  [14]],
1103//       [ [11], [12], [15],  [16]]]]
1104//
1105// ```
1106//
1107// Arguments:
1108//
1109//	block_size: The size of the spatial block, same as in Space2Depth.
1110func DepthToSpace(scope *Scope, input tf.Output, block_size int64, optional ...DepthToSpaceAttr) (output tf.Output) {
1111	if scope.Err() != nil {
1112		return
1113	}
1114	attrs := map[string]interface{}{"block_size": block_size}
1115	for _, a := range optional {
1116		a(attrs)
1117	}
1118	opspec := tf.OpSpec{
1119		Type: "DepthToSpace",
1120		Input: []tf.Input{
1121			input,
1122		},
1123		Attrs: attrs,
1124	}
1125	op := scope.AddOperation(opspec)
1126	return op.Output(0)
1127}
1128
1129// SpaceToDepthAttr is an optional argument to SpaceToDepth.
1130type SpaceToDepthAttr func(optionalAttr)
1131
1132// SpaceToDepthDataFormat sets the optional data_format attribute to value.
1133// If not specified, defaults to "NHWC"
1134func SpaceToDepthDataFormat(value string) SpaceToDepthAttr {
1135	return func(m optionalAttr) {
1136		m["data_format"] = value
1137	}
1138}
1139
1140// SpaceToDepth for tensors of type T.
1141//
1142// Rearranges blocks of spatial data, into depth. More specifically,
1143// this op outputs a copy of the input tensor where values from the `height`
1144// and `width` dimensions are moved to the `depth` dimension.
1145// The attr `block_size` indicates the input block size.
1146//
1147//   * Non-overlapping blocks of size `block_size x block size` are rearranged
1148//     into depth at each location.
1149//   * The depth of the output tensor is `block_size * block_size * input_depth`.
1150//   * The Y, X coordinates within each block of the input become the high order
1151//     component of the output channel index.
1152//   * The input tensor's height and width must be divisible by block_size.
1153//
1154// The `data_format` attr specifies the layout of the input and output tensors
1155// with the following options:
1156//   "NHWC": `[ batch, height, width, channels ]`
1157//   "NCHW": `[ batch, channels, height, width ]`
1158//   "NCHW_VECT_C":
1159//       `qint8 [ batch, channels / 4, height, width, 4 ]`
1160//
1161// It is useful to consider the operation as transforming a 6-D Tensor.
1162// e.g. for data_format = NHWC,
1163//      Each element in the input tensor can be specified via 6 coordinates,
1164//      ordered by decreasing memory layout significance as:
1165//      n,oY,bY,oX,bX,iC  (where n=batch index, oX, oY means X or Y coordinates
1166//                         within the output image, bX, bY means coordinates
1167//                         within the input block, iC means input channels).
1168//      The output would be a transpose to the following layout:
1169//      n,oY,oX,bY,bX,iC
1170//
1171// This operation is useful for resizing the activations between convolutions
1172// (but keeping all data), e.g. instead of pooling. It is also useful for training
1173// purely convolutional models.
1174//
1175// For example, given an input of shape `[1, 2, 2, 1]`, data_format = "NHWC" and
1176// block_size = 2:
1177//
1178// ```
1179// x = [[[[1], [2]],
1180//       [[3], [4]]]]
1181// ```
1182//
1183// This operation will output a tensor of shape `[1, 1, 1, 4]`:
1184//
1185// ```
1186// [[[[1, 2, 3, 4]]]]
1187// ```
1188//
1189// Here, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`,
1190// the corresponding output will have a single element (i.e. width and height are
1191// both 1) and will have a depth of 4 channels (1 * block_size * block_size).
1192// The output element shape is `[1, 1, 4]`.
1193//
1194// For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g.
1195//
1196// ```
1197// x = [[[[1, 2, 3], [4, 5, 6]],
1198//       [[7, 8, 9], [10, 11, 12]]]]
1199// ```
1200//
1201// This operation, for block_size of 2, will return the following tensor of shape
1202// `[1, 1, 1, 12]`
1203//
1204// ```
1205// [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
1206// ```
1207//
1208// Similarly, for the following input of shape `[1 4 4 1]`, and a block size of 2:
1209//
1210// ```
1211// x = [[[[1],   [2],  [5],  [6]],
1212//       [[3],   [4],  [7],  [8]],
1213//       [[9],  [10], [13],  [14]],
1214//       [[11], [12], [15],  [16]]]]
1215// ```
1216//
1217// the operator will return the following tensor of shape `[1 2 2 4]`:
1218//
1219// ```
1220// x = [[[[1, 2, 3, 4],
1221//        [5, 6, 7, 8]],
1222//       [[9, 10, 11, 12],
1223//        [13, 14, 15, 16]]]]
1224// ```
1225//
1226// Arguments:
1227//
1228//	block_size: The size of the spatial block.
1229func SpaceToDepth(scope *Scope, input tf.Output, block_size int64, optional ...SpaceToDepthAttr) (output tf.Output) {
1230	if scope.Err() != nil {
1231		return
1232	}
1233	attrs := map[string]interface{}{"block_size": block_size}
1234	for _, a := range optional {
1235		a(attrs)
1236	}
1237	opspec := tf.OpSpec{
1238		Type: "SpaceToDepth",
1239		Input: []tf.Input{
1240			input,
1241		},
1242		Attrs: attrs,
1243	}
1244	op := scope.AddOperation(opspec)
1245	return op.Output(0)
1246}
1247
1248// BatchToSpace for 4-D tensors of type T.
1249//
1250// This is a legacy version of the more general BatchToSpaceND.
1251//
1252// Rearranges (permutes) data from batch into blocks of spatial data, followed by
1253// cropping. This is the reverse transformation of SpaceToBatch. More specifically,
1254// this op outputs a copy of the input tensor where values from the `batch`
1255// dimension are moved in spatial blocks to the `height` and `width` dimensions,
1256// followed by cropping along the `height` and `width` dimensions.
1257//
1258// Arguments:
1259//	input: 4-D tensor with shape
1260// `[batch*block_size*block_size, height_pad/block_size, width_pad/block_size,
1261//   depth]`. Note that the batch size of the input tensor must be divisible by
1262// `block_size * block_size`.
1263//	crops: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies
1264// how many elements to crop from the intermediate result across the spatial
1265// dimensions as follows:
1266//
1267//     crops = [[crop_top, crop_bottom], [crop_left, crop_right]]
1268//
1269//
1270// Returns 4-D with shape `[batch, height, width, depth]`, where:
1271//
1272//       height = height_pad - crop_top - crop_bottom
1273//       width = width_pad - crop_left - crop_right
1274//
1275// The attr `block_size` must be greater than one. It indicates the block size.
1276//
1277// Some examples:
1278//
1279// (1) For the following input of shape `[4, 1, 1, 1]` and block_size of 2:
1280//
1281// ```
1282// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
1283// ```
1284//
1285// The output tensor has shape `[1, 2, 2, 1]` and value:
1286//
1287// ```
1288// x = [[[[1], [2]], [[3], [4]]]]
1289// ```
1290//
1291// (2) For the following input of shape `[4, 1, 1, 3]` and block_size of 2:
1292//
1293// ```
1294// [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
1295// ```
1296//
1297// The output tensor has shape `[1, 2, 2, 3]` and value:
1298//
1299// ```
1300// x = [[[[1, 2, 3], [4, 5, 6]],
1301//       [[7, 8, 9], [10, 11, 12]]]]
1302// ```
1303//
1304// (3) For the following input of shape `[4, 2, 2, 1]` and block_size of 2:
1305//
1306// ```
1307// x = [[[[1], [3]], [[9], [11]]],
1308//      [[[2], [4]], [[10], [12]]],
1309//      [[[5], [7]], [[13], [15]]],
1310//      [[[6], [8]], [[14], [16]]]]
1311// ```
1312//
1313// The output tensor has shape `[1, 4, 4, 1]` and value:
1314//
1315// ```
1316// x = [[[[1],   [2],  [3],  [4]],
1317//      [[5],   [6],  [7],  [8]],
1318//      [[9],  [10], [11],  [12]],
1319//      [[13], [14], [15],  [16]]]]
1320// ```
1321//
1322// (4) For the following input of shape `[8, 1, 2, 1]` and block_size of 2:
1323//
1324// ```
1325// x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
1326//      [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
1327// ```
1328//
1329// The output tensor has shape `[2, 2, 4, 1]` and value:
1330//
1331// ```
1332// x = [[[[1], [3]], [[5], [7]]],
1333//      [[[2], [4]], [[10], [12]]],
1334//      [[[5], [7]], [[13], [15]]],
1335//      [[[6], [8]], [[14], [16]]]]
1336// ```
1337func BatchToSpace(scope *Scope, input tf.Output, crops tf.Output, block_size int64) (output tf.Output) {
1338	if scope.Err() != nil {
1339		return
1340	}
1341	attrs := map[string]interface{}{"block_size": block_size}
1342	opspec := tf.OpSpec{
1343		Type: "BatchToSpace",
1344		Input: []tf.Input{
1345			input, crops,
1346		},
1347		Attrs: attrs,
1348	}
1349	op := scope.AddOperation(opspec)
1350	return op.Output(0)
1351}
1352
1353// SpaceToBatch for 4-D tensors of type T.
1354//
1355// This is a legacy version of the more general SpaceToBatchND.
1356//
1357// Zero-pads and then rearranges (permutes) blocks of spatial data into batch.
1358// More specifically, this op outputs a copy of the input tensor where values from
1359// the `height` and `width` dimensions are moved to the `batch` dimension. After
1360// the zero-padding, both `height` and `width` of the input must be divisible by the
1361// block size.
1362//
1363// Arguments:
1364//	input: 4-D with shape `[batch, height, width, depth]`.
1365//	paddings: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies
1366//   the padding of the input with zeros across the spatial dimensions as follows:
1367//
1368//       paddings = [[pad_top, pad_bottom], [pad_left, pad_right]]
1369//
1370//   The effective spatial dimensions of the zero-padded input tensor will be:
1371//
1372//       height_pad = pad_top + height + pad_bottom
1373//       width_pad = pad_left + width + pad_right
1374//
1375// The attr `block_size` must be greater than one. It indicates the block size.
1376//
1377//   * Non-overlapping blocks of size `block_size x block size` in the height and
1378//     width dimensions are rearranged into the batch dimension at each location.
1379//   * The batch of the output tensor is `batch * block_size * block_size`.
1380//   * Both height_pad and width_pad must be divisible by block_size.
1381//
1382// The shape of the output will be:
1383//
1384//     [batch*block_size*block_size, height_pad/block_size, width_pad/block_size,
1385//      depth]
1386//
1387// Some examples:
1388//
1389// (1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2:
1390//
1391// ```
1392// x = [[[[1], [2]], [[3], [4]]]]
1393// ```
1394//
1395// The output tensor has shape `[4, 1, 1, 1]` and value:
1396//
1397// ```
1398// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
1399// ```
1400//
1401// (2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2:
1402//
1403// ```
1404// x = [[[[1, 2, 3], [4, 5, 6]],
1405//       [[7, 8, 9], [10, 11, 12]]]]
1406// ```
1407//
1408// The output tensor has shape `[4, 1, 1, 3]` and value:
1409//
1410// ```
1411// [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
1412// ```
1413//
1414// (3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2:
1415//
1416// ```
1417// x = [[[[1],   [2],  [3],  [4]],
1418//       [[5],   [6],  [7],  [8]],
1419//       [[9],  [10], [11],  [12]],
1420//       [[13], [14], [15],  [16]]]]
1421// ```
1422//
1423// The output tensor has shape `[4, 2, 2, 1]` and value:
1424//
1425// ```
1426// x = [[[[1], [3]], [[9], [11]]],
1427//      [[[2], [4]], [[10], [12]]],
1428//      [[[5], [7]], [[13], [15]]],
1429//      [[[6], [8]], [[14], [16]]]]
1430// ```
1431//
1432// (4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2:
1433//
1434// ```
1435// x = [[[[1],   [2],  [3],  [4]],
1436//       [[5],   [6],  [7],  [8]]],
1437//      [[[9],  [10], [11],  [12]],
1438//       [[13], [14], [15],  [16]]]]
1439// ```
1440//
1441// The output tensor has shape `[8, 1, 2, 1]` and value:
1442//
1443// ```
1444// x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
1445//      [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
1446// ```
1447//
1448// Among others, this operation is useful for reducing atrous convolution into
1449// regular convolution.
1450//
1451func SpaceToBatch(scope *Scope, input tf.Output, paddings tf.Output, block_size int64) (output tf.Output) {
1452	if scope.Err() != nil {
1453		return
1454	}
1455	attrs := map[string]interface{}{"block_size": block_size}
1456	opspec := tf.OpSpec{
1457		Type: "SpaceToBatch",
1458		Input: []tf.Input{
1459			input, paddings,
1460		},
1461		Attrs: attrs,
1462	}
1463	op := scope.AddOperation(opspec)
1464	return op.Output(0)
1465}
1466
1467// SqueezeAttr is an optional argument to Squeeze.
1468type SqueezeAttr func(optionalAttr)
1469
1470// SqueezeAxis sets the optional axis attribute to value.
1471//
1472// value: If specified, only squeezes the dimensions listed. The dimension
1473// index starts at 0. It is an error to squeeze a dimension that is not 1. Must
1474// be in the range `[-rank(input), rank(input))`.
1475// If not specified, defaults to <>
1476//
1477// REQUIRES: len(value) >= 0
1478func SqueezeAxis(value []int64) SqueezeAttr {
1479	return func(m optionalAttr) {
1480		m["squeeze_dims"] = value
1481	}
1482}
1483
1484// Removes dimensions of size 1 from the shape of a tensor.
1485//
1486// Given a tensor `input`, this operation returns a tensor of the same type with
1487// all dimensions of size 1 removed. If you don't want to remove all size 1
1488// dimensions, you can remove specific size 1 dimensions by specifying
1489// `axis`.
1490//
1491// For example:
1492//
1493// ```
1494// # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
1495// shape(squeeze(t)) ==> [2, 3]
1496// ```
1497//
1498// Or, to remove specific size 1 dimensions:
1499//
1500// ```
1501// # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
1502// shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1]
1503// ```
1504//
1505// Arguments:
1506//	input: The `input` to squeeze.
1507//
1508// Returns Contains the same data as `input`, but has one or more dimensions of
1509// size 1 removed.
1510func Squeeze(scope *Scope, input tf.Output, optional ...SqueezeAttr) (output tf.Output) {
1511	if scope.Err() != nil {
1512		return
1513	}
1514	attrs := map[string]interface{}{}
1515	for _, a := range optional {
1516		a(attrs)
1517	}
1518	opspec := tf.OpSpec{
1519		Type: "Squeeze",
1520		Input: []tf.Input{
1521			input,
1522		},
1523		Attrs: attrs,
1524	}
1525	op := scope.AddOperation(opspec)
1526	return op.Output(0)
1527}
1528
1529// A placeholder op that passes through `input` when its output is not fed.
1530//
1531// Arguments:
1532//	input: The default value to produce when `output` is not fed.
1533//	shape: The (possibly partial) shape of the tensor.
1534//
1535// Returns A placeholder tensor that defaults to `input` if it is not fed.
1536func PlaceholderWithDefault(scope *Scope, input tf.Output, shape tf.Shape) (output tf.Output) {
1537	if scope.Err() != nil {
1538		return
1539	}
1540	attrs := map[string]interface{}{"shape": shape}
1541	opspec := tf.OpSpec{
1542		Type: "PlaceholderWithDefault",
1543		Input: []tf.Input{
1544			input,
1545		},
1546		Attrs: attrs,
1547	}
1548	op := scope.AddOperation(opspec)
1549	return op.Output(0)
1550}
1551
1552// PlaceholderAttr is an optional argument to Placeholder.
1553type PlaceholderAttr func(optionalAttr)
1554
1555// PlaceholderShape sets the optional shape attribute to value.
1556//
1557// value: (Optional) The shape of the tensor. If the shape has 0 dimensions, the
1558// shape is unconstrained.
1559// If not specified, defaults to <unknown_rank:true >
1560func PlaceholderShape(value tf.Shape) PlaceholderAttr {
1561	return func(m optionalAttr) {
1562		m["shape"] = value
1563	}
1564}
1565
1566// A placeholder op for a value that will be fed into the computation.
1567//
1568// N.B. This operation will fail with an error if it is executed. It is
1569// intended as a way to represent a value that will always be fed, and to
1570// provide attrs that enable the fed value to be checked at runtime.
1571//
1572// Arguments:
1573//	dtype: The type of elements in the tensor.
1574//
1575// Returns A placeholder tensor that must be replaced using the feed mechanism.
1576func Placeholder(scope *Scope, dtype tf.DataType, optional ...PlaceholderAttr) (output tf.Output) {
1577	if scope.Err() != nil {
1578		return
1579	}
1580	attrs := map[string]interface{}{"dtype": dtype}
1581	for _, a := range optional {
1582		a(attrs)
1583	}
1584	opspec := tf.OpSpec{
1585		Type: "Placeholder",
1586
1587		Attrs: attrs,
1588	}
1589	op := scope.AddOperation(opspec)
1590	return op.Output(0)
1591}
1592
1593// Return the reduction indices for computing gradients of s0 op s1 with broadcast.
1594//
1595// This is typically used by gradient computations for a broadcasting operation.
1596func BroadcastGradientArgs(scope *Scope, s0 tf.Output, s1 tf.Output) (r0 tf.Output, r1 tf.Output) {
1597	if scope.Err() != nil {
1598		return
1599	}
1600	opspec := tf.OpSpec{
1601		Type: "BroadcastGradientArgs",
1602		Input: []tf.Input{
1603			s0, s1,
1604		},
1605	}
1606	op := scope.AddOperation(opspec)
1607	return op.Output(0), op.Output(1)
1608}
1609
1610// Return the shape of s0 op s1 with broadcast.
1611//
1612// Given `s0` and `s1`, tensors that represent shapes, compute `r0`, the
1613// broadcasted shape. `s0`, `s1` and `r0` are all integer vectors.
1614func BroadcastArgs(scope *Scope, s0 tf.Output, s1 tf.Output) (r0 tf.Output) {
1615	if scope.Err() != nil {
1616		return
1617	}
1618	opspec := tf.OpSpec{
1619		Type: "BroadcastArgs",
1620		Input: []tf.Input{
1621			s0, s1,
1622		},
1623	}
1624	op := scope.AddOperation(opspec)
1625	return op.Output(0)
1626}
1627
1628// TensorStridedSliceUpdateAttr is an optional argument to TensorStridedSliceUpdate.
1629type TensorStridedSliceUpdateAttr func(optionalAttr)
1630
1631// TensorStridedSliceUpdateBeginMask sets the optional begin_mask attribute to value.
1632// If not specified, defaults to 0
1633func TensorStridedSliceUpdateBeginMask(value int64) TensorStridedSliceUpdateAttr {
1634	return func(m optionalAttr) {
1635		m["begin_mask"] = value
1636	}
1637}
1638
1639// TensorStridedSliceUpdateEndMask sets the optional end_mask attribute to value.
1640// If not specified, defaults to 0
1641func TensorStridedSliceUpdateEndMask(value int64) TensorStridedSliceUpdateAttr {
1642	return func(m optionalAttr) {
1643		m["end_mask"] = value
1644	}
1645}
1646
1647// TensorStridedSliceUpdateEllipsisMask sets the optional ellipsis_mask attribute to value.
1648// If not specified, defaults to 0
1649func TensorStridedSliceUpdateEllipsisMask(value int64) TensorStridedSliceUpdateAttr {
1650	return func(m optionalAttr) {
1651		m["ellipsis_mask"] = value
1652	}
1653}
1654
1655// TensorStridedSliceUpdateNewAxisMask sets the optional new_axis_mask attribute to value.
1656// If not specified, defaults to 0
1657func TensorStridedSliceUpdateNewAxisMask(value int64) TensorStridedSliceUpdateAttr {
1658	return func(m optionalAttr) {
1659		m["new_axis_mask"] = value
1660	}
1661}
1662
1663// TensorStridedSliceUpdateShrinkAxisMask sets the optional shrink_axis_mask attribute to value.
1664// If not specified, defaults to 0
1665func TensorStridedSliceUpdateShrinkAxisMask(value int64) TensorStridedSliceUpdateAttr {
1666	return func(m optionalAttr) {
1667		m["shrink_axis_mask"] = value
1668	}
1669}
1670
1671// Assign `value` to the sliced l-value reference of `input`.
1672//
1673// The values of `value` are assigned to the positions in the tensor `input` that
1674// are selected by the slice parameters. The slice parameters `begin` `end`
1675// `strides` etc. work exactly as in `StridedSlice`.
1676//
1677// NOTE this op currently does not support broadcasting and so `value`'s shape
1678// must be exactly the shape produced by the slice of `input`.
1679func TensorStridedSliceUpdate(scope *Scope, input tf.Output, begin tf.Output, end tf.Output, strides tf.Output, value tf.Output, optional ...TensorStridedSliceUpdateAttr) (output tf.Output) {
1680	if scope.Err() != nil {
1681		return
1682	}
1683	attrs := map[string]interface{}{}
1684	for _, a := range optional {
1685		a(attrs)
1686	}
1687	opspec := tf.OpSpec{
1688		Type: "TensorStridedSliceUpdate",
1689		Input: []tf.Input{
1690			input, begin, end, strides, value,
1691		},
1692		Attrs: attrs,
1693	}
1694	op := scope.AddOperation(opspec)
1695	return op.Output(0)
1696}
1697
1698// Computes the eigen decomposition of a batch of self-adjoint matrices
1699//
1700// (Note: Only real inputs are supported).
1701//
1702// Computes the eigenvalues and eigenvectors of the innermost N-by-N matrices in
1703// tensor such that tensor[...,:,:] * v[..., :,i] = e[..., i] * v[...,:,i], for
1704// i=0...N-1.
1705//
1706// Arguments:
1707//	a: the input tensor.
1708//	lower: a boolean specifies whether the calculation is done with the lower
1709// triangular part or the upper triangular part.
1710//	max_iter: maximum number of sweep update, i.e., the whole lower triangular
1711// part or upper triangular part based on parameter lower. Heuristically, it has
1712// been argued that approximately logN sweeps are needed in practice (Ref: Golub &
1713// van Loan "Matrix Computation").
1714//	epsilon: the tolerance ratio.
1715//
1716// Returns:
1717//	w: The eigenvalues in ascending order, each repeated according to its
1718// multiplicity.
1719//	v: The column v[..., :, i] is the normalized eigenvector corresponding to the
1720// eigenvalue w[..., i].
1721func XlaSelfAdjointEig(scope *Scope, a tf.Output, lower bool, max_iter int64, epsilon float32) (w tf.Output, v tf.Output) {
1722	if scope.Err() != nil {
1723		return
1724	}
1725	attrs := map[string]interface{}{"lower": lower, "max_iter": max_iter, "epsilon": epsilon}
1726	opspec := tf.OpSpec{
1727		Type: "XlaSelfAdjointEig",
1728		Input: []tf.Input{
1729			a,
1730		},
1731		Attrs: attrs,
1732	}
1733	op := scope.AddOperation(opspec)
1734	return op.Output(0), op.Output(1)
1735}
1736
1737// Ensures that the tensor's shape matches the expected shape.
1738//
1739// Raises an error if the input tensor's shape does not match the specified shape.
1740// Returns the input tensor otherwise.
1741//
1742// Arguments:
1743//	input: A tensor, whose shape is to be validated.
1744//	shape: The expected (possibly partially specified) shape of the input tensor.
1745//
1746// Returns A tensor with the same shape and contents as the input tensor or value.
1747func EnsureShape(scope *Scope, input tf.Output, shape tf.Shape) (output tf.Output) {
1748	if scope.Err() != nil {
1749		return
1750	}
1751	attrs := map[string]interface{}{"shape": shape}
1752	opspec := tf.OpSpec{
1753		Type: "EnsureShape",
1754		Input: []tf.Input{
1755			input,
1756		},
1757		Attrs: attrs,
1758	}
1759	op := scope.AddOperation(opspec)
1760	return op.Output(0)
1761}
1762
1763// ShapeAttr is an optional argument to Shape.
1764type ShapeAttr func(optionalAttr)
1765
1766// ShapeOutType sets the optional out_type attribute to value.
1767// If not specified, defaults to DT_INT32
1768func ShapeOutType(value tf.DataType) ShapeAttr {
1769	return func(m optionalAttr) {
1770		m["out_type"] = value
1771	}
1772}
1773
1774// Returns the shape of a tensor.
1775//
1776// This operation returns a 1-D integer tensor representing the shape of `input`.
1777//
1778// For example:
1779//
1780// ```
1781// # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
1782// shape(t) ==> [2, 2, 3]
1783// ```
1784func Shape(scope *Scope, input tf.Output, optional ...ShapeAttr) (output tf.Output) {
1785	if scope.Err() != nil {
1786		return
1787	}
1788	attrs := map[string]interface{}{}
1789	for _, a := range optional {
1790		a(attrs)
1791	}
1792	opspec := tf.OpSpec{
1793		Type: "Shape",
1794		Input: []tf.Input{
1795			input,
1796		},
1797		Attrs: attrs,
1798	}
1799	op := scope.AddOperation(opspec)
1800	return op.Output(0)
1801}
1802
1803// UniqueWithCountsV2Attr is an optional argument to UniqueWithCountsV2.
1804type UniqueWithCountsV2Attr func(optionalAttr)
1805
1806// UniqueWithCountsV2OutIdx sets the optional out_idx attribute to value.
1807// If not specified, defaults to DT_INT32
1808func UniqueWithCountsV2OutIdx(value tf.DataType) UniqueWithCountsV2Attr {
1809	return func(m optionalAttr) {
1810		m["out_idx"] = value
1811	}
1812}
1813
1814// Finds unique elements along an axis of a tensor.
1815//
1816// This operation either returns a tensor `y` containing unique elements
1817// along the `axis` of a tensor. The returned unique elements is sorted
1818// in the same order as they occur along `axis` in `x`.
1819// This operation also returns a tensor `idx` and a tensor `count`
1820// that are the same size as the number of the elements in `x` along the
1821// `axis` dimension. The `idx` contains the index in the unique output `y`
1822// and the `count` contains the count in the unique output `y`.
1823// In other words, for an `1-D` tensor `x` with `axis = None:
1824//
1825// `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
1826//
1827// For example:
1828//
1829// ```
1830// x = tf.constant([1, 1, 2, 4, 4, 4, 7, 8, 8])
1831// y, idx, count = UniqueWithCountsV2(x, axis = [0])
1832// y ==> [1, 2, 4, 7, 8]
1833// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
1834// count ==> [2, 1, 3, 1, 2]
1835// ```
1836//
1837// For a `2-D` tensor `x` with `axis = 0`:
1838//
1839// ```
1840// x = tf.constant([[1, 0, 0],
1841//                 [1, 0, 0],
1842//                 [2, 0, 0]])
1843// y, idx, count = UniqueWithCountsV2(x, axis=[0])
1844// y ==> [[1, 0, 0],
1845//        [2, 0, 0]]
1846// idx ==> [0, 0, 1]
1847// count ==> [2, 1]
1848// ```
1849//
1850// For a `2-D` tensor `x` with `axis = 1`:
1851//
1852// ```
1853// x = tf.constant([[1, 0, 0],
1854//                 [1, 0, 0],
1855//                 [2, 0, 0]])
1856// y, idx, count = UniqueWithCountsV2(x, axis=[1])
1857// y ==> [[1, 0],
1858//        [1, 0],
1859//        [2, 0]]
1860// idx ==> [0, 1, 1]
1861// count ==> [1, 2]
1862// ```
1863//
1864// Arguments:
1865//	x: A `Tensor`.
1866//	axis: A `Tensor` of type `int32` (default: None). The axis of the Tensor to
1867// find the unique elements.
1868//
1869// Returns:
1870//	y: A `Tensor`. Unique elements along the `axis` of `Tensor` x.
1871//	idx: A 1-D Tensor. Has the same type as x that contains the index of each
1872// value of x in the output y.
1873//	count: A 1-D Tensor. The count of each value of x in the output y.
1874func UniqueWithCountsV2(scope *Scope, x tf.Output, axis tf.Output, optional ...UniqueWithCountsV2Attr) (y tf.Output, idx tf.Output, count tf.Output) {
1875	if scope.Err() != nil {
1876		return
1877	}
1878	attrs := map[string]interface{}{}
1879	for _, a := range optional {
1880		a(attrs)
1881	}
1882	opspec := tf.OpSpec{
1883		Type: "UniqueWithCountsV2",
1884		Input: []tf.Input{
1885			x, axis,
1886		},
1887		Attrs: attrs,
1888	}
1889	op := scope.AddOperation(opspec)
1890	return op.Output(0), op.Output(1), op.Output(2)
1891}
1892
1893// Shuffle dimensions of x according to a permutation and conjugate the result.
1894//
1895// The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy:
1896//   `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`
1897//   `y[i,j,k,...,s,t,u] == conj(x[perm[i], perm[j], perm[k],...,perm[s], perm[t], perm[u]])`
1898func ConjugateTranspose(scope *Scope, x tf.Output, perm tf.Output) (y tf.Output) {
1899	if scope.Err() != nil {
1900		return
1901	}
1902	opspec := tf.OpSpec{
1903		Type: "ConjugateTranspose",
1904		Input: []tf.Input{
1905			x, perm,
1906		},
1907	}
1908	op := scope.AddOperation(opspec)
1909	return op.Output(0)
1910}
1911
1912// Computes the inverse permutation of a tensor.
1913//
1914// This operation computes the inverse of an index permutation. It takes a 1-D
1915// integer tensor `x`, which represents the indices of a zero-based array, and
1916// swaps each value with its index position. In other words, for an output tensor
1917// `y` and an input tensor `x`, this operation computes the following:
1918//
1919// `y[x[i]] = i for i in [0, 1, ..., len(x) - 1]`
1920//
1921// The values must include 0. There can be no duplicate values or negative values.
1922//
1923// For example:
1924//
1925// ```
1926// # tensor `x` is [3, 4, 0, 2, 1]
1927// invert_permutation(x) ==> [2, 4, 3, 0, 1]
1928// ```
1929//
1930// Arguments:
1931//	x: 1-D.
1932//
1933// Returns 1-D.
1934func InvertPermutation(scope *Scope, x tf.Output) (y tf.Output) {
1935	if scope.Err() != nil {
1936		return
1937	}
1938	opspec := tf.OpSpec{
1939		Type: "InvertPermutation",
1940		Input: []tf.Input{
1941			x,
1942		},
1943	}
1944	op := scope.AddOperation(opspec)
1945	return op.Output(0)
1946}
1947
1948// PreventGradientAttr is an optional argument to PreventGradient.
1949type PreventGradientAttr func(optionalAttr)
1950
1951// PreventGradientMessage sets the optional message attribute to value.
1952//
1953// value: Will be printed in the error when anyone tries to differentiate
1954// this operation.
1955// If not specified, defaults to ""
1956func PreventGradientMessage(value string) PreventGradientAttr {
1957	return func(m optionalAttr) {
1958		m["message"] = value
1959	}
1960}
1961
1962// An identity op that triggers an error if a gradient is requested.
1963//
1964// When executed in a graph, this op outputs its input tensor as-is.
1965//
1966// When building ops to compute gradients, the TensorFlow gradient system
1967// will return an error when trying to lookup the gradient of this op,
1968// because no gradient must ever be registered for this function.  This
1969// op exists to prevent subtle bugs from silently returning unimplemented
1970// gradients in some corner cases.
1971//
1972// Arguments:
1973//	input: any tensor.
1974//
1975// Returns the same input tensor.
1976func PreventGradient(scope *Scope, input tf.Output, optional ...PreventGradientAttr) (output tf.Output) {
1977	if scope.Err() != nil {
1978		return
1979	}
1980	attrs := map[string]interface{}{}
1981	for _, a := range optional {
1982		a(attrs)
1983	}
1984	opspec := tf.OpSpec{
1985		Type: "PreventGradient",
1986		Input: []tf.Input{
1987			input,
1988		},
1989		Attrs: attrs,
1990	}
1991	op := scope.AddOperation(opspec)
1992	return op.Output(0)
1993}
1994
1995// Stops gradient computation.
1996//
1997// When executed in a graph, this op outputs its input tensor as-is.
1998//
1999// When building ops to compute gradients, this op prevents the contribution of
2000// its inputs to be taken into account.  Normally, the gradient generator adds ops
2001// to a graph to compute the derivatives of a specified 'loss' by recursively
2002// finding out inputs that contributed to its computation.  If you insert this op
2003// in the graph it inputs are masked from the gradient generator.  They are not
2004// taken into account for computing gradients.
2005//
2006// This is useful any time you want to compute a value with TensorFlow but need
2007// to pretend that the value was a constant. For example, the softmax function
2008// for a vector x can be written as
2009//
2010// ```python
2011//
2012//   def softmax(x):
2013//     numerator = tf.exp(x)
2014//     denominator = tf.reduce_sum(numerator)
2015//     return numerator / denominator
2016// ```
2017//
2018// This however is susceptible to overflow if the values in x are large. An
2019// alternative more stable way is to subtract the maximum of x from each of the
2020// values.
2021//
2022// ```python
2023//
2024//   def stable_softmax(x):
2025//     z = x - tf.reduce_max(x)
2026//     numerator = tf.exp(z)
2027//     denominator = tf.reduce_sum(numerator)
2028//     return numerator / denominator
2029// ```
2030//
2031// However, when we backprop through the softmax to x, we dont want to backprop
2032// through the `tf.reduce_max(x)` (if the max values are not unique then the
2033// gradient could flow to the wrong input) calculation and treat that as a
2034// constant. Therefore, we should write this out as
2035//
2036// ```python
2037//
2038//   def stable_softmax(x):
2039//     z = x - tf.stop_gradient(tf.reduce_max(x))
2040//     numerator = tf.exp(z)
2041//     denominator = tf.reduce_sum(numerator)
2042//     return numerator / denominator
2043// ```
2044//
2045// Some other examples include:
2046//
2047// *  The *EM* algorithm where the *M-step* should not involve backpropagation
2048//    through the output of the *E-step*.
2049// *  Contrastive divergence training of Boltzmann machines where, when
2050//    differentiating the energy function, the training must not backpropagate
2051//    through the graph that generated the samples from the model.
2052// *  Adversarial training, where no backprop should happen through the adversarial
2053//    example generation process.
2054func StopGradient(scope *Scope, input tf.Output) (output tf.Output) {
2055	if scope.Err() != nil {
2056		return
2057	}
2058	opspec := tf.OpSpec{
2059		Type: "StopGradient",
2060		Input: []tf.Input{
2061			input,
2062		},
2063	}
2064	op := scope.AddOperation(opspec)
2065	return op.Output(0)
2066}
2067
2068// Identity op for gradient debugging.
2069//
2070// This op is hidden from public in Python. It is used by TensorFlow Debugger to
2071// register gradient tensors for gradient debugging.
2072// This op operates on non-reference-type tensors.
2073func DebugGradientIdentity(scope *Scope, input tf.Output) (output tf.Output) {
2074	if scope.Err() != nil {
2075		return
2076	}
2077	opspec := tf.OpSpec{
2078		Type: "DebugGradientIdentity",
2079		Input: []tf.Input{
2080			input,
2081		},
2082	}
2083	op := scope.AddOperation(opspec)
2084	return op.Output(0)
2085}
2086
2087// Gather slices from `params` into a Tensor with shape specified by `indices`.
2088//
2089// `indices` is a K-dimensional integer tensor, best thought of as a
2090// (K-1)-dimensional tensor of indices into `params`, where each element defines a
2091// slice of `params`:
2092//
2093//     output[\\(i_0, ..., i_{K-2}\\)] = params[indices[\\(i_0, ..., i_{K-2}\\)]]
2094//
2095// Whereas in `tf.gather` `indices` defines slices into the `axis`
2096// dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the
2097// first `N` dimensions of `params`, where `N = indices.shape[-1]`.
2098//
2099// The last dimension of `indices` can be at most the rank of
2100// `params`:
2101//
2102//     indices.shape[-1] <= params.rank
2103//
2104// The last dimension of `indices` corresponds to elements
2105// (if `indices.shape[-1] == params.rank`) or slices
2106// (if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]`
2107// of `params`.  The output tensor has shape
2108//
2109//     indices.shape[:-1] + params.shape[indices.shape[-1]:]
2110//
2111// Note that on CPU, if an out of bound index is found, an error is returned.
2112// On GPU, if an out of bound index is found, a 0 is stored in the
2113// corresponding output value.
2114//
2115// Some examples below.
2116//
2117// Simple indexing into a matrix:
2118//
2119// ```python
2120//     indices = [[0, 0], [1, 1]]
2121//     params = [['a', 'b'], ['c', 'd']]
2122//     output = ['a', 'd']
2123// ```
2124//
2125// Slice indexing into a matrix:
2126//
2127// ```python
2128//     indices = [[1], [0]]
2129//     params = [['a', 'b'], ['c', 'd']]
2130//     output = [['c', 'd'], ['a', 'b']]
2131// ```
2132//
2133// Indexing into a 3-tensor:
2134//
2135// ```python
2136//     indices = [[1]]
2137//     params = [[['a0', 'b0'], ['c0', 'd0']],
2138//               [['a1', 'b1'], ['c1', 'd1']]]
2139//     output = [[['a1', 'b1'], ['c1', 'd1']]]
2140//
2141//
2142//     indices = [[0, 1], [1, 0]]
2143//     params = [[['a0', 'b0'], ['c0', 'd0']],
2144//               [['a1', 'b1'], ['c1', 'd1']]]
2145//     output = [['c0', 'd0'], ['a1', 'b1']]
2146//
2147//
2148//     indices = [[0, 0, 1], [1, 0, 1]]
2149//     params = [[['a0', 'b0'], ['c0', 'd0']],
2150//               [['a1', 'b1'], ['c1', 'd1']]]
2151//     output = ['b0', 'b1']
2152// ```
2153//
2154// Batched indexing into a matrix:
2155//
2156// ```python
2157//     indices = [[[0, 0]], [[0, 1]]]
2158//     params = [['a', 'b'], ['c', 'd']]
2159//     output = [['a'], ['b']]
2160// ```
2161//
2162// Batched slice indexing into a matrix:
2163//
2164// ```python
2165//     indices = [[[1]], [[0]]]
2166//     params = [['a', 'b'], ['c', 'd']]
2167//     output = [[['c', 'd']], [['a', 'b']]]
2168// ```
2169//
2170// Batched indexing into a 3-tensor:
2171//
2172// ```python
2173//     indices = [[[1]], [[0]]]
2174//     params = [[['a0', 'b0'], ['c0', 'd0']],
2175//               [['a1', 'b1'], ['c1', 'd1']]]
2176//     output = [[[['a1', 'b1'], ['c1', 'd1']]],
2177//               [[['a0', 'b0'], ['c0', 'd0']]]]
2178//
2179//     indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]]
2180//     params = [[['a0', 'b0'], ['c0', 'd0']],
2181//               [['a1', 'b1'], ['c1', 'd1']]]
2182//     output = [[['c0', 'd0'], ['a1', 'b1']],
2183//               [['a0', 'b0'], ['c1', 'd1']]]
2184//
2185//
2186//     indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]]
2187//     params = [[['a0', 'b0'], ['c0', 'd0']],
2188//               [['a1', 'b1'], ['c1', 'd1']]]
2189//     output = [['b0', 'b1'], ['d0', 'c1']]
2190// ```
2191//
2192// See also `tf.gather` and `tf.batch_gather`.
2193//
2194// Arguments:
2195//	params: The tensor from which to gather values.
2196//	indices: Index tensor.
2197//
2198// Returns Values from `params` gathered from indices given by `indices`, with
2199// shape `indices.shape[:-1] + params.shape[indices.shape[-1]:]`.
2200func GatherNd(scope *Scope, params tf.Output, indices tf.Output) (output tf.Output) {
2201	if scope.Err() != nil {
2202		return
2203	}
2204	opspec := tf.OpSpec{
2205		Type: "GatherNd",
2206		Input: []tf.Input{
2207			params, indices,
2208		},
2209	}
2210	op := scope.AddOperation(opspec)
2211	return op.Output(0)
2212}
2213
2214// GatherV2Attr is an optional argument to GatherV2.
2215type GatherV2Attr func(optionalAttr)
2216
2217// GatherV2BatchDims sets the optional batch_dims attribute to value.
2218// If not specified, defaults to 0
2219func GatherV2BatchDims(value int64) GatherV2Attr {
2220	return func(m optionalAttr) {
2221		m["batch_dims"] = value
2222	}
2223}
2224
2225// Gather slices from `params` axis `axis` according to `indices`.
2226//
2227// `indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
2228// Produces an output tensor with shape `params.shape[:axis] +
2229// indices.shape[batch_dims:] + params.shape[axis + 1:]` where:
2230//
2231// ```python
2232//     # Scalar indices (output is rank(params) - 1).
2233//     output[a_0, ..., a_n, b_0, ..., b_n] =
2234//       params[a_0, ..., a_n, indices, b_0, ..., b_n]
2235//
2236//     # Vector indices (output is rank(params)).
2237//     output[a_0, ..., a_n, i, b_0, ..., b_n] =
2238//       params[a_0, ..., a_n, indices[i], b_0, ..., b_n]
2239//
2240//     # Higher rank indices (output is rank(params) + rank(indices) - 1).
2241//     output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] =
2242//       params[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n]
2243// ```
2244//
2245// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
2246// <img style="width:100%" src="https://www.tensorflow.org/images/Gather.png" alt>
2247// </div>
2248//
2249// Note that on CPU, if an out of bound index is found, an error is returned.
2250// On GPU, if an out of bound index is found, a 0 is stored in the
2251// corresponding output value.
2252//
2253// See also `tf.batch_gather` and `tf.gather_nd`.
2254//
2255// Arguments:
2256//	params: The tensor from which to gather values. Must be at least rank
2257// `axis + 1`.
2258//	indices: Index tensor. Must be in range `[0, params.shape[axis])`.
2259//	axis: The axis in `params` to gather `indices` from. Defaults to the first
2260// dimension. Supports negative indexes.
2261//
2262// Returns Values from `params` gathered from indices given by `indices`, with
2263// shape `params.shape[:axis] + indices.shape + params.shape[axis + 1:]`.
2264func GatherV2(scope *Scope, params tf.Output, indices tf.Output, axis tf.Output, optional ...GatherV2Attr) (output tf.Output) {
2265	if scope.Err() != nil {
2266		return
2267	}
2268	attrs := map[string]interface{}{}
2269	for _, a := range optional {
2270		a(attrs)
2271	}
2272	opspec := tf.OpSpec{
2273		Type: "GatherV2",
2274		Input: []tf.Input{
2275			params, indices, axis,
2276		},
2277		Attrs: attrs,
2278	}
2279	op := scope.AddOperation(opspec)
2280	return op.Output(0)
2281}
2282
2283// Reverses specific dimensions of a tensor.
2284//
2285// NOTE `tf.reverse` has now changed behavior in preparation for 1.0.
2286// `tf.reverse_v2` is currently an alias that will be deprecated before TF 1.0.
2287//
2288// Given a `tensor`, and a `int32` tensor `axis` representing the set of
2289// dimensions of `tensor` to reverse. This operation reverses each dimension
2290// `i` for which there exists `j` s.t. `axis[j] == i`.
2291//
2292// `tensor` can have up to 8 dimensions. The number of dimensions specified
2293// in `axis` may be 0 or more entries. If an index is specified more than
2294// once, a InvalidArgument error is raised.
2295//
2296// For example:
2297//
2298// ```
2299// # tensor 't' is [[[[ 0,  1,  2,  3],
2300// #                  [ 4,  5,  6,  7],
2301// #                  [ 8,  9, 10, 11]],
2302// #                 [[12, 13, 14, 15],
2303// #                  [16, 17, 18, 19],
2304// #                  [20, 21, 22, 23]]]]
2305// # tensor 't' shape is [1, 2, 3, 4]
2306//
2307// # 'dims' is [3] or 'dims' is [-1]
2308// reverse(t, dims) ==> [[[[ 3,  2,  1,  0],
2309//                         [ 7,  6,  5,  4],
2310//                         [ 11, 10, 9, 8]],
2311//                        [[15, 14, 13, 12],
2312//                         [19, 18, 17, 16],
2313//                         [23, 22, 21, 20]]]]
2314//
2315// # 'dims' is '[1]' (or 'dims' is '[-3]')
2316// reverse(t, dims) ==> [[[[12, 13, 14, 15],
2317//                         [16, 17, 18, 19],
2318//                         [20, 21, 22, 23]
2319//                        [[ 0,  1,  2,  3],
2320//                         [ 4,  5,  6,  7],
2321//                         [ 8,  9, 10, 11]]]]
2322//
2323// # 'dims' is '[2]' (or 'dims' is '[-2]')
2324// reverse(t, dims) ==> [[[[8, 9, 10, 11],
2325//                         [4, 5, 6, 7],
2326//                         [0, 1, 2, 3]]
2327//                        [[20, 21, 22, 23],
2328//                         [16, 17, 18, 19],
2329//                         [12, 13, 14, 15]]]]
2330// ```
2331//
2332// Arguments:
2333//	tensor: Up to 8-D.
2334//	axis: 1-D. The indices of the dimensions to reverse. Must be in the range
2335// `[-rank(tensor), rank(tensor))`.
2336//
2337// Returns The same shape as `tensor`.
2338func ReverseV2(scope *Scope, tensor tf.Output, axis tf.Output) (output tf.Output) {
2339	if scope.Err() != nil {
2340		return
2341	}
2342	opspec := tf.OpSpec{
2343		Type: "ReverseV2",
2344		Input: []tf.Input{
2345			tensor, axis,
2346		},
2347	}
2348	op := scope.AddOperation(opspec)
2349	return op.Output(0)
2350}
2351
2352// Returns the batched diagonal part of a batched tensor.
2353//
2354// This operation returns a tensor with the `diagonal` part
2355// of the batched `input`. The `diagonal` part is computed as follows:
2356//
2357// Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a
2358// tensor of rank `k - 1` with dimensions `[I, J, K, ..., min(M, N)]` where:
2359//
2360// `diagonal[i, j, k, ..., n] = input[i, j, k, ..., n, n]`.
2361//
2362// The input must be at least a matrix.
2363//
2364// For example:
2365//
2366// ```
2367// # 'input' is [[[1, 0, 0, 0]
2368//                [0, 2, 0, 0]
2369//                [0, 0, 3, 0]
2370//                [0, 0, 0, 4]],
2371//               [[5, 0, 0, 0]
2372//                [0, 6, 0, 0]
2373//                [0, 0, 7, 0]
2374//                [0, 0, 0, 8]]]
2375//
2376// and input.shape = (2, 4, 4)
2377//
2378// tf.matrix_diag_part(input) ==> [[1, 2, 3, 4], [5, 6, 7, 8]]
2379//
2380// which has shape (2, 4)
2381// ```
2382//
2383// Arguments:
2384//	input: Rank `k` tensor where `k >= 2`.
2385//
2386// Returns The extracted diagonal(s) having shape
2387// `diagonal.shape = input.shape[:-2] + [min(input.shape[-2:])]`.
2388func MatrixDiagPart(scope *Scope, input tf.Output) (diagonal tf.Output) {
2389	if scope.Err() != nil {
2390		return
2391	}
2392	opspec := tf.OpSpec{
2393		Type: "MatrixDiagPart",
2394		Input: []tf.Input{
2395			input,
2396		},
2397	}
2398	op := scope.AddOperation(opspec)
2399	return op.Output(0)
2400}
2401
2402// MatrixSetDiagV3Attr is an optional argument to MatrixSetDiagV3.
2403type MatrixSetDiagV3Attr func(optionalAttr)
2404
2405// MatrixSetDiagV3Align sets the optional align attribute to value.
2406//
2407// value: Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is
2408// a string specifying how superdiagonals and subdiagonals should be aligned,
2409// respectively. There are four possible alignments: "RIGHT_LEFT" (default),
2410// "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals
2411// to the right (left-pads the row) and subdiagonals to the left (right-pads the
2412// row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is
2413// the opposite alignment.
2414// If not specified, defaults to "RIGHT_LEFT"
2415func MatrixSetDiagV3Align(value string) MatrixSetDiagV3Attr {
2416	return func(m optionalAttr) {
2417		m["align"] = value
2418	}
2419}
2420
2421// Returns a batched matrix tensor with new batched diagonal values.
2422//
2423// Given `input` and `diagonal`, this operation returns a tensor with the
2424// same shape and values as `input`, except for the specified diagonals of the
2425// innermost matrices. These will be overwritten by the values in `diagonal`.
2426//
2427// `input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or
2428// `k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`.
2429// Otherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`.
2430// `num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`.
2431// `max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`,
2432// `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
2433//
2434// The output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`.
2435// If `k` is scalar or `k[0] == k[1]`:
2436//
2437// ```
2438// output[i, j, ..., l, m, n]
2439//   = diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1]
2440//     input[i, j, ..., l, m, n]              ; otherwise
2441// ```
2442//
2443// Otherwise,
2444//
2445// ```
2446// output[i, j, ..., l, m, n]
2447//   = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]
2448//     input[i, j, ..., l, m, n]                         ; otherwise
2449// ```
2450// where `d = n - m`, `diag_index = k[1] - d`, and
2451// `index_in_diag = n - max(d, 0) + offset`.
2452//
2453// `offset` is zero except when the alignment of the diagonal is to the right.
2454// ```
2455// offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}
2456//                                            and `d >= 0`) or
2457//                                          (`align` in {LEFT_RIGHT, RIGHT_RIGHT}
2458//                                            and `d <= 0`)
2459//          0                          ; otherwise
2460// ```
2461// where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.
2462//
2463// For example:
2464//
2465// ```
2466// # The main diagonal.
2467// input = np.array([[[7, 7, 7, 7],              # Input shape: (2, 3, 4)
2468//                    [7, 7, 7, 7],
2469//                    [7, 7, 7, 7]],
2470//                   [[7, 7, 7, 7],
2471//                    [7, 7, 7, 7],
2472//                    [7, 7, 7, 7]]])
2473// diagonal = np.array([[1, 2, 3],               # Diagonal shape: (2, 3)
2474//                      [4, 5, 6]])
2475// tf.matrix_set_diag(input, diagonal)
2476//   ==> [[[1, 7, 7, 7],  # Output shape: (2, 3, 4)
2477//         [7, 2, 7, 7],
2478//         [7, 7, 3, 7]],
2479//        [[4, 7, 7, 7],
2480//         [7, 5, 7, 7],
2481//         [7, 7, 6, 7]]]
2482//
2483// # A superdiagonal (per batch).
2484// tf.matrix_set_diag(input, diagonal, k = 1)
2485//   ==> [[[7, 1, 7, 7],  # Output shape: (2, 3, 4)
2486//         [7, 7, 2, 7],
2487//         [7, 7, 7, 3]],
2488//        [[7, 4, 7, 7],
2489//         [7, 7, 5, 7],
2490//         [7, 7, 7, 6]]]
2491//
2492// # A band of diagonals.
2493// diagonals = np.array([[[0, 9, 1],  # Diagonal shape: (2, 4, 3)
2494//                        [6, 5, 8],
2495//                        [1, 2, 3],
2496//                        [4, 5, 0]],
2497//                       [[0, 1, 2],
2498//                        [5, 6, 4],
2499//                        [6, 1, 2],
2500//                        [3, 4, 0]]])
2501// tf.matrix_set_diag(input, diagonals, k = (-1, 2))
2502//   ==> [[[1, 6, 9, 7],  # Output shape: (2, 3, 4)
2503//         [4, 2, 5, 1],
2504//         [7, 5, 3, 8]],
2505//        [[6, 5, 1, 7],
2506//         [3, 1, 6, 2],
2507//         [7, 4, 2, 4]]]
2508//
2509// # LEFT_RIGHT alignment.
2510// diagonals = np.array([[[9, 1, 0],  # Diagonal shape: (2, 4, 3)
2511//                        [6, 5, 8],
2512//                        [1, 2, 3],
2513//                        [0, 4, 5]],
2514//                       [[1, 2, 0],
2515//                        [5, 6, 4],
2516//                        [6, 1, 2],
2517//                        [0, 3, 4]]])
2518// tf.matrix_set_diag(input, diagonals, k = (-1, 2), align="LEFT_RIGHT")
2519//   ==> [[[1, 6, 9, 7],  # Output shape: (2, 3, 4)
2520//         [4, 2, 5, 1],
2521//         [7, 5, 3, 8]],
2522//        [[6, 5, 1, 7],
2523//         [3, 1, 6, 2],
2524//         [7, 4, 2, 4]]]
2525//
2526// ```
2527//
2528// Arguments:
2529//	input: Rank `r+1`, where `r >= 1`.
2530//	diagonal: Rank `r` when `k` is an integer or `k[0] == k[1]`. Otherwise, it has rank `r+1`.
2531// `k >= 1`.
2532//	k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
2533// diagonal, and negative value means subdiagonals. `k` can be a single integer
2534// (for a single diagonal) or a pair of integers specifying the low and high ends
2535// of a matrix band. `k[0]` must not be larger than `k[1]`.
2536//
2537// Returns Rank `r+1`, with `output.shape = input.shape`.
2538func MatrixSetDiagV3(scope *Scope, input tf.Output, diagonal tf.Output, k tf.Output, optional ...MatrixSetDiagV3Attr) (output tf.Output) {
2539	if scope.Err() != nil {
2540		return
2541	}
2542	attrs := map[string]interface{}{}
2543	for _, a := range optional {
2544		a(attrs)
2545	}
2546	opspec := tf.OpSpec{
2547		Type: "MatrixSetDiagV3",
2548		Input: []tf.Input{
2549			input, diagonal, k,
2550		},
2551		Attrs: attrs,
2552	}
2553	op := scope.AddOperation(opspec)
2554	return op.Output(0)
2555}
2556
2557// Returns a batched matrix tensor with new batched diagonal values.
2558//
2559// Given `input` and `diagonal`, this operation returns a tensor with the
2560// same shape and values as `input`, except for the specified diagonals of the
2561// innermost matrices. These will be overwritten by the values in `diagonal`.
2562//
2563// `input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or
2564// `k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`.
2565// Otherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`.
2566// `num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`.
2567// `max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`,
2568// `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
2569//
2570// The output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`.
2571// If `k` is scalar or `k[0] == k[1]`:
2572//
2573// ```
2574// output[i, j, ..., l, m, n]
2575//   = diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1]
2576//     input[i, j, ..., l, m, n]              ; otherwise
2577// ```
2578//
2579// Otherwise,
2580//
2581// ```
2582// output[i, j, ..., l, m, n]
2583//   = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]
2584//     input[i, j, ..., l, m, n]                         ; otherwise
2585// ```
2586// where `d = n - m`, `diag_index = k[1] - d`, and `index_in_diag = n - max(d, 0)`.
2587//
2588// For example:
2589//
2590// ```
2591// # The main diagonal.
2592// input = np.array([[[7, 7, 7, 7],              # Input shape: (2, 3, 4)
2593//                    [7, 7, 7, 7],
2594//                    [7, 7, 7, 7]],
2595//                   [[7, 7, 7, 7],
2596//                    [7, 7, 7, 7],
2597//                    [7, 7, 7, 7]]])
2598// diagonal = np.array([[1, 2, 3],               # Diagonal shape: (2, 3)
2599//                      [4, 5, 6]])
2600// tf.matrix_set_diag(diagonal) ==> [[[1, 7, 7, 7],  # Output shape: (2, 3, 4)
2601//                                    [7, 2, 7, 7],
2602//                                    [7, 7, 3, 7]],
2603//                                   [[4, 7, 7, 7],
2604//                                    [7, 5, 7, 7],
2605//                                    [7, 7, 6, 7]]]
2606//
2607// # A superdiagonal (per batch).
2608// tf.matrix_set_diag(diagonal, k = 1)
2609//   ==> [[[7, 1, 7, 7],  # Output shape: (2, 3, 4)
2610//         [7, 7, 2, 7],
2611//         [7, 7, 7, 3]],
2612//        [[7, 4, 7, 7],
2613//         [7, 7, 5, 7],
2614//         [7, 7, 7, 6]]]
2615//
2616// # A band of diagonals.
2617// diagonals = np.array([[[1, 2, 3],  # Diagonal shape: (2, 2, 3)
2618//                        [4, 5, 0]],
2619//                       [[6, 1, 2],
2620//                        [3, 4, 0]]])
2621// tf.matrix_set_diag(diagonals, k = (-1, 0))
2622//   ==> [[[1, 7, 7, 7],  # Output shape: (2, 3, 4)
2623//         [4, 2, 7, 7],
2624//         [0, 5, 3, 7]],
2625//        [[6, 7, 7, 7],
2626//         [3, 1, 7, 7],
2627//         [7, 4, 2, 7]]]
2628//
2629// ```
2630//
2631// Arguments:
2632//	input: Rank `r+1`, where `r >= 1`.
2633//	diagonal: Rank `r` when `k` is an integer or `k[0] == k[1]`. Otherwise, it has rank `r+1`.
2634// `k >= 1`.
2635//	k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
2636// diagonal, and negative value means subdiagonals. `k` can be a single integer
2637// (for a single diagonal) or a pair of integers specifying the low and high ends
2638// of a matrix band. `k[0]` must not be larger than `k[1]`.
2639//
2640// Returns Rank `r+1`, with `output.shape = input.shape`.
2641func MatrixSetDiagV2(scope *Scope, input tf.Output, diagonal tf.Output, k tf.Output) (output tf.Output) {
2642	if scope.Err() != nil {
2643		return
2644	}
2645	opspec := tf.OpSpec{
2646		Type: "MatrixSetDiagV2",
2647		Input: []tf.Input{
2648			input, diagonal, k,
2649		},
2650	}
2651	op := scope.AddOperation(opspec)
2652	return op.Output(0)
2653}
2654
2655// Returns a diagonal tensor with a given diagonal values.
2656//
2657// Given a `diagonal`, this operation returns a tensor with the `diagonal` and
2658// everything else padded with zeros. The diagonal is computed as follows:
2659//
2660// Assume `diagonal` has dimensions [D1,..., Dk], then the output is a tensor of
2661// rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where:
2662//
2663// `output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere else.
2664//
2665// For example:
2666//
2667// ```
2668// # 'diagonal' is [1, 2, 3, 4]
2669// tf.diag(diagonal) ==> [[1, 0, 0, 0]
2670//                        [0, 2, 0, 0]
2671//                        [0, 0, 3, 0]
2672//                        [0, 0, 0, 4]]
2673// ```
2674//
2675// Arguments:
2676//	diagonal: Rank k tensor where k is at most 1.
2677func Diag(scope *Scope, diagonal tf.Output) (output tf.Output) {
2678	if scope.Err() != nil {
2679		return
2680	}
2681	opspec := tf.OpSpec{
2682		Type: "Diag",
2683		Input: []tf.Input{
2684			diagonal,
2685		},
2686	}
2687	op := scope.AddOperation(opspec)
2688	return op.Output(0)
2689}
2690
2691// Returns a tensor of ones with the same shape and type as x.
2692//
2693// Arguments:
2694//	x: a tensor of type T.
2695//
2696// Returns a tensor of the same shape and type as x but filled with ones.
2697func OnesLike(scope *Scope, x tf.Output) (y tf.Output) {
2698	if scope.Err() != nil {
2699		return
2700	}
2701	opspec := tf.OpSpec{
2702		Type: "OnesLike",
2703		Input: []tf.Input{
2704			x,
2705		},
2706	}
2707	op := scope.AddOperation(opspec)
2708	return op.Output(0)
2709}
2710
2711// Returns a constant tensor on the host. Only for writing C++ tests.
2712//
2713// Arguments:
2714//	value: Attr `value` is the tensor to return.
2715//
2716func HostConst(scope *Scope, value tf.Tensor, dtype tf.DataType) (output tf.Output) {
2717	if scope.Err() != nil {
2718		return
2719	}
2720	attrs := map[string]interface{}{"value": value, "dtype": dtype}
2721	opspec := tf.OpSpec{
2722		Type: "HostConst",
2723
2724		Attrs: attrs,
2725	}
2726	op := scope.AddOperation(opspec)
2727	return op.Output(0)
2728}
2729
2730// Splits a tensor into `num_split` tensors along one dimension.
2731//
2732// Arguments:
2733//	axis: 0-D.  The dimension along which to split.  Must be in the range
2734// `[-rank(value), rank(value))`.
2735//	value: The tensor to split.
2736//	num_split: The number of ways to split.  Must evenly divide
2737// `value.shape[split_dim]`.
2738//
2739// Returns They are identically shaped tensors, whose shape matches that of `value`
2740// except along `axis`, where their sizes are
2741// `values.shape[split_dim] / num_split`.
2742func Split(scope *Scope, axis tf.Output, value tf.Output, num_split int64) (output []tf.Output) {
2743	if scope.Err() != nil {
2744		return
2745	}
2746	attrs := map[string]interface{}{"num_split": num_split}
2747	opspec := tf.OpSpec{
2748		Type: "Split",
2749		Input: []tf.Input{
2750			axis, value,
2751		},
2752		Attrs: attrs,
2753	}
2754	op := scope.AddOperation(opspec)
2755	if scope.Err() != nil {
2756		return
2757	}
2758	var idx int
2759	var err error
2760	if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
2761		scope.UpdateErr("Split", err)
2762		return
2763	}
2764	return output
2765}
2766
2767// Computes offsets of concat inputs within its output.
2768//
2769// For example:
2770//
2771// ```
2772// # 'x' is [2, 2, 7]
2773// # 'y' is [2, 3, 7]
2774// # 'z' is [2, 5, 7]
2775// concat_offset(2, [x, y, z]) => [0, 0, 0], [0, 2, 0], [0, 5, 0]
2776// ```
2777//
2778// This is typically used by gradient computations for a concat operation.
2779//
2780// Arguments:
2781//	concat_dim: The dimension along which to concatenate.
2782//	shape: The `N` int32 vectors representing shape of tensors being concatenated.
2783//
2784// Returns The `N` int32 vectors representing the starting offset
2785// of input tensors within the concatenated output.
2786func ConcatOffset(scope *Scope, concat_dim tf.Output, shape []tf.Output) (offset []tf.Output) {
2787	if scope.Err() != nil {
2788		return
2789	}
2790	opspec := tf.OpSpec{
2791		Type: "ConcatOffset",
2792		Input: []tf.Input{
2793			concat_dim, tf.OutputList(shape),
2794		},
2795	}
2796	op := scope.AddOperation(opspec)
2797	if scope.Err() != nil {
2798		return
2799	}
2800	var idx int
2801	var err error
2802	if offset, idx, err = makeOutputList(op, idx, "offset"); err != nil {
2803		scope.UpdateErr("ConcatOffset", err)
2804		return
2805	}
2806	return offset
2807}
2808
2809// Converts an array of flat indices into a tuple of coordinate arrays.
2810//
2811//
2812// Example:
2813//
2814// ```
2815// y = tf.unravel_index(indices=[2, 5, 7], dims=[3, 3])
2816// # 'dims' represent a hypothetical (3, 3) tensor of indices:
2817// # [[0, 1, *2*],
2818// #  [3, 4, *5*],
2819// #  [6, *7*, 8]]
2820// # For each entry from 'indices', this operation returns
2821// # its coordinates (marked with '*'), such as
2822// # 2 ==> (0, 2)
2823// # 5 ==> (1, 2)
2824// # 7 ==> (2, 1)
2825// y ==> [[0, 1, 2], [2, 2, 1]]
2826// ```
2827//
2828// @compatibility(numpy)
2829// Equivalent to np.unravel_index
2830// @end_compatibility
2831//
2832// Arguments:
2833//	indices: An 0-D or 1-D `int` Tensor whose elements are indices into the
2834// flattened version of an array of dimensions dims.
2835//	dims: An 1-D `int` Tensor. The shape of the array to use for unraveling
2836// indices.
2837//
2838// Returns An 2-D (or 1-D if indices is 0-D) tensor where each row has the
2839// same shape as the indices array.
2840func UnravelIndex(scope *Scope, indices tf.Output, dims tf.Output) (output tf.Output) {
2841	if scope.Err() != nil {
2842		return
2843	}
2844	opspec := tf.OpSpec{
2845		Type: "UnravelIndex",
2846		Input: []tf.Input{
2847			indices, dims,
2848		},
2849	}
2850	op := scope.AddOperation(opspec)
2851	return op.Output(0)
2852}
2853
2854// EmptyAttr is an optional argument to Empty.
2855type EmptyAttr func(optionalAttr)
2856
2857// EmptyInit sets the optional init attribute to value.
2858//
2859// value: If True, initialize the returned tensor with the default value of dtype.  Otherwise, the implementation is free not to initializethe tensor's content.
2860// If not specified, defaults to false
2861func EmptyInit(value bool) EmptyAttr {
2862	return func(m optionalAttr) {
2863		m["init"] = value
2864	}
2865}
2866
2867// Creates a tensor with the given shape.
2868//
2869// This operation creates a tensor of `shape` and `dtype`.
2870//
2871// Arguments:
2872//	shape: 1-D. Represents the shape of the output tensor.
2873//
2874//
2875// Returns A `Tensor` of type `T`.
2876func Empty(scope *Scope, shape tf.Output, dtype tf.DataType, optional ...EmptyAttr) (output tf.Output) {
2877	if scope.Err() != nil {
2878		return
2879	}
2880	attrs := map[string]interface{}{"dtype": dtype}
2881	for _, a := range optional {
2882		a(attrs)
2883	}
2884	opspec := tf.OpSpec{
2885		Type: "Empty",
2886		Input: []tf.Input{
2887			shape,
2888		},
2889		Attrs: attrs,
2890	}
2891	op := scope.AddOperation(opspec)
2892	return op.Output(0)
2893}
2894
2895//     Subtracts `v` into specified rows of `x`.
2896//
2897//     Computes y = x; y[i, :] -= v; return y.
2898//
2899// Arguments:
2900//	x: A `Tensor` of type T.
2901//	i: A vector. Indices into the left-most dimension of `x`.
2902//	v: A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size.
2903//
2904// Returns A `Tensor` of type T. An alias of `x`. The content of `y` is undefined if there are duplicates in `i`.
2905func InplaceSub(scope *Scope, x tf.Output, i tf.Output, v tf.Output) (y tf.Output) {
2906	if scope.Err() != nil {
2907		return
2908	}
2909	opspec := tf.OpSpec{
2910		Type: "InplaceSub",
2911		Input: []tf.Input{
2912			x, i, v,
2913		},
2914	}
2915	op := scope.AddOperation(opspec)
2916	return op.Output(0)
2917}
2918
2919// PackAttr is an optional argument to Pack.
2920type PackAttr func(optionalAttr)
2921
2922// PackAxis sets the optional axis attribute to value.
2923//
2924// value: Dimension along which to pack.  Negative values wrap around, so the
2925// valid range is `[-(R+1), R+1)`.
2926// If not specified, defaults to 0
2927func PackAxis(value int64) PackAttr {
2928	return func(m optionalAttr) {
2929		m["axis"] = value
2930	}
2931}
2932
2933// Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor.
2934//
2935// Packs the `N` tensors in `values` into a tensor with rank one higher than each
2936// tensor in `values`, by packing them along the `axis` dimension.
2937// Given a list of tensors of shape `(A, B, C)`;
2938//
2939// if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.
2940// if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.
2941// Etc.
2942//
2943// For example:
2944//
2945// ```
2946// # 'x' is [1, 4]
2947// # 'y' is [2, 5]
2948// # 'z' is [3, 6]
2949// pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]]  # Pack along first dim.
2950// pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]]
2951// ```
2952//
2953// This is the opposite of `unpack`.
2954//
2955// Arguments:
2956//	values: Must be of same shape and type.
2957//
2958// Returns The packed tensor.
2959func Pack(scope *Scope, values []tf.Output, optional ...PackAttr) (output tf.Output) {
2960	if scope.Err() != nil {
2961		return
2962	}
2963	attrs := map[string]interface{}{}
2964	for _, a := range optional {
2965		a(attrs)
2966	}
2967	opspec := tf.OpSpec{
2968		Type: "Pack",
2969		Input: []tf.Input{
2970			tf.OutputList(values),
2971		},
2972		Attrs: attrs,
2973	}
2974	op := scope.AddOperation(opspec)
2975	return op.Output(0)
2976}
2977
2978// MfccAttr is an optional argument to Mfcc.
2979type MfccAttr func(optionalAttr)
2980
2981// MfccUpperFrequencyLimit sets the optional upper_frequency_limit attribute to value.
2982//
2983// value: The highest frequency to use when calculating the
2984// ceptstrum.
2985// If not specified, defaults to 4000
2986func MfccUpperFrequencyLimit(value float32) MfccAttr {
2987	return func(m optionalAttr) {
2988		m["upper_frequency_limit"] = value
2989	}
2990}
2991
2992// MfccLowerFrequencyLimit sets the optional lower_frequency_limit attribute to value.
2993//
2994// value: The lowest frequency to use when calculating the
2995// ceptstrum.
2996// If not specified, defaults to 20
2997func MfccLowerFrequencyLimit(value float32) MfccAttr {
2998	return func(m optionalAttr) {
2999		m["lower_frequency_limit"] = value
3000	}
3001}
3002
3003// MfccFilterbankChannelCount sets the optional filterbank_channel_count attribute to value.
3004//
3005// value: Resolution of the Mel bank used internally.
3006// If not specified, defaults to 40
3007func MfccFilterbankChannelCount(value int64) MfccAttr {
3008	return func(m optionalAttr) {
3009		m["filterbank_channel_count"] = value
3010	}
3011}
3012
3013// MfccDctCoefficientCount sets the optional dct_coefficient_count attribute to value.
3014//
3015// value: How many output channels to produce per time slice.
3016// If not specified, defaults to 13
3017func MfccDctCoefficientCount(value int64) MfccAttr {
3018	return func(m optionalAttr) {
3019		m["dct_coefficient_count"] = value
3020	}
3021}
3022
3023// Transforms a spectrogram into a form that's useful for speech recognition.
3024//
3025// Mel Frequency Cepstral Coefficients are a way of representing audio data that's
3026// been effective as an input feature for machine learning. They are created by
3027// taking the spectrum of a spectrogram (a 'cepstrum'), and discarding some of the
3028// higher frequencies that are less significant to the human ear. They have a long
3029// history in the speech recognition world, and https://en.wikipedia.org/wiki/Mel-frequency_cepstrum
3030// is a good resource to learn more.
3031//
3032// Arguments:
3033//	spectrogram: Typically produced by the Spectrogram op, with magnitude_squared
3034// set to true.
3035//	sample_rate: How many samples per second the source audio used.
3036func Mfcc(scope *Scope, spectrogram tf.Output, sample_rate tf.Output, optional ...MfccAttr) (output tf.Output) {
3037	if scope.Err() != nil {
3038		return
3039	}
3040	attrs := map[string]interface{}{}
3041	for _, a := range optional {
3042		a(attrs)
3043	}
3044	opspec := tf.OpSpec{
3045		Type: "Mfcc",
3046		Input: []tf.Input{
3047			spectrogram, sample_rate,
3048		},
3049		Attrs: attrs,
3050	}
3051	op := scope.AddOperation(opspec)
3052	return op.Output(0)
3053}
3054
3055// AudioSpectrogramAttr is an optional argument to AudioSpectrogram.
3056type AudioSpectrogramAttr func(optionalAttr)
3057
3058// AudioSpectrogramMagnitudeSquared sets the optional magnitude_squared attribute to value.
3059//
3060// value: Whether to return the squared magnitude or just the
3061// magnitude. Using squared magnitude can avoid extra calculations.
3062// If not specified, defaults to false
3063func AudioSpectrogramMagnitudeSquared(value bool) AudioSpectrogramAttr {
3064	return func(m optionalAttr) {
3065		m["magnitude_squared"] = value
3066	}
3067}
3068
3069// Produces a visualization of audio data over time.
3070//
3071// Spectrograms are a standard way of representing audio information as a series of
3072// slices of frequency information, one slice for each window of time. By joining
3073// these together into a sequence, they form a distinctive fingerprint of the sound
3074// over time.
3075//
3076// This op expects to receive audio data as an input, stored as floats in the range
3077// -1 to 1, together with a window width in samples, and a stride specifying how
3078// far to move the window between slices. From this it generates a three
3079// dimensional output. The first dimension is for the channels in the input, so a
3080// stereo audio input would have two here for example. The second dimension is time,
3081// with successive frequency slices. The third dimension has an amplitude value for
3082// each frequency during that time slice.
3083//
3084// This means the layout when converted and saved as an image is rotated 90 degrees
3085// clockwise from a typical spectrogram. Time is descending down the Y axis, and
3086// the frequency decreases from left to right.
3087//
3088// Each value in the result represents the square root of the sum of the real and
3089// imaginary parts of an FFT on the current window of samples. In this way, the
3090// lowest dimension represents the power of each frequency in the current window,
3091// and adjacent windows are concatenated in the next dimension.
3092//
3093// To get a more intuitive and visual look at what this operation does, you can run
3094// tensorflow/examples/wav_to_spectrogram to read in an audio file and save out the
3095// resulting spectrogram as a PNG image.
3096//
3097// Arguments:
3098//	input: Float representation of audio data.
3099//	window_size: How wide the input window is in samples. For the highest efficiency
3100// this should be a power of two, but other values are accepted.
3101//	stride: How widely apart the center of adjacent sample windows should be.
3102//
3103// Returns 3D representation of the audio frequencies as an image.
3104func AudioSpectrogram(scope *Scope, input tf.Output, window_size int64, stride int64, optional ...AudioSpectrogramAttr) (spectrogram tf.Output) {
3105	if scope.Err() != nil {
3106		return
3107	}
3108	attrs := map[string]interface{}{"window_size": window_size, "stride": stride}
3109	for _, a := range optional {
3110		a(attrs)
3111	}
3112	opspec := tf.OpSpec{
3113		Type: "AudioSpectrogram",
3114		Input: []tf.Input{
3115			input,
3116		},
3117		Attrs: attrs,
3118	}
3119	op := scope.AddOperation(opspec)
3120	return op.Output(0)
3121}
3122
3123// DecodeWavAttr is an optional argument to DecodeWav.
3124type DecodeWavAttr func(optionalAttr)
3125
3126// DecodeWavDesiredChannels sets the optional desired_channels attribute to value.
3127//
3128// value: Number of sample channels wanted.
3129// If not specified, defaults to -1
3130func DecodeWavDesiredChannels(value int64) DecodeWavAttr {
3131	return func(m optionalAttr) {
3132		m["desired_channels"] = value
3133	}
3134}
3135
3136// DecodeWavDesiredSamples sets the optional desired_samples attribute to value.
3137//
3138// value: Length of audio requested.
3139// If not specified, defaults to -1
3140func DecodeWavDesiredSamples(value int64) DecodeWavAttr {
3141	return func(m optionalAttr) {
3142		m["desired_samples"] = value
3143	}
3144}
3145
3146// Decode a 16-bit PCM WAV file to a float tensor.
3147//
3148// The -32768 to 32767 signed 16-bit values will be scaled to -1.0 to 1.0 in float.
3149//
3150// When desired_channels is set, if the input contains fewer channels than this
3151// then the last channel will be duplicated to give the requested number, else if
3152// the input has more channels than requested then the additional channels will be
3153// ignored.
3154//
3155// If desired_samples is set, then the audio will be cropped or padded with zeroes
3156// to the requested length.
3157//
3158// The first output contains a Tensor with the content of the audio samples. The
3159// lowest dimension will be the number of channels, and the second will be the
3160// number of samples. For example, a ten-sample-long stereo WAV file should give an
3161// output shape of [10, 2].
3162//
3163// Arguments:
3164//	contents: The WAV-encoded audio, usually from a file.
3165//
3166// Returns:
3167//	audio: 2-D with shape `[length, channels]`.
3168//	sample_rate: Scalar holding the sample rate found in the WAV header.
3169func DecodeWav(scope *Scope, contents tf.Output, optional ...DecodeWavAttr) (audio tf.Output, sample_rate tf.Output) {
3170	if scope.Err() != nil {
3171		return
3172	}
3173	attrs := map[string]interface{}{}
3174	for _, a := range optional {
3175		a(attrs)
3176	}
3177	opspec := tf.OpSpec{
3178		Type: "DecodeWav",
3179		Input: []tf.Input{
3180			contents,
3181		},
3182		Attrs: attrs,
3183	}
3184	op := scope.AddOperation(opspec)
3185	return op.Output(0), op.Output(1)
3186}
3187
3188// UnbatchGradAttr is an optional argument to UnbatchGrad.
3189type UnbatchGradAttr func(optionalAttr)
3190
3191// UnbatchGradContainer sets the optional container attribute to value.
3192// If not specified, defaults to ""
3193func UnbatchGradContainer(value string) UnbatchGradAttr {
3194	return func(m optionalAttr) {
3195		m["container"] = value
3196	}
3197}
3198
3199// UnbatchGradSharedName sets the optional shared_name attribute to value.
3200// If not specified, defaults to ""
3201func UnbatchGradSharedName(value string) UnbatchGradAttr {
3202	return func(m optionalAttr) {
3203		m["shared_name"] = value
3204	}
3205}
3206
3207// Gradient of Unbatch.
3208//
3209// Acts like Batch but using the given batch_index index of batching things as they
3210// become available. This ensures that the gradients are propagated back in the
3211// same session which did the forward pass.
3212//
3213// original_input: The input to the Unbatch operation this is the gradient of.
3214// batch_index: The batch_index given to the Unbatch operation this is the gradient
3215// of.
3216// grad: The downstream gradient.
3217// id: The id scalar emitted by Batch.
3218// batched_grad: The return value, either an empty tensor or the batched gradient.
3219// container: Container to control resource sharing.
3220// shared_name: Instances of UnbatchGrad with the same container and shared_name
3221//  are assumed to possibly belong to the same batch. If left empty, the op name
3222//  will be used as the shared name.
3223func UnbatchGrad(scope *Scope, original_input tf.Output, batch_index tf.Output, grad tf.Output, id tf.Output, optional ...UnbatchGradAttr) (batched_grad tf.Output) {
3224	if scope.Err() != nil {
3225		return
3226	}
3227	attrs := map[string]interface{}{}
3228	for _, a := range optional {
3229		a(attrs)
3230	}
3231	opspec := tf.OpSpec{
3232		Type: "UnbatchGrad",
3233		Input: []tf.Input{
3234			original_input, batch_index, grad, id,
3235		},
3236		Attrs: attrs,
3237	}
3238	op := scope.AddOperation(opspec)
3239	return op.Output(0)
3240}
3241
3242// Computes element-wise population count (a.k.a. popcount, bitsum, bitcount).
3243//
3244// For each entry in `x`, calculates the number of `1` (on) bits in the binary
3245// representation of that entry.
3246//
3247// **NOTE**: It is more efficient to first `tf.bitcast` your tensors into
3248// `int32` or `int64` and perform the bitcount on the result, than to feed in
3249// 8- or 16-bit inputs and then aggregate the resulting counts.
3250func PopulationCount(scope *Scope, x tf.Output) (y tf.Output) {
3251	if scope.Err() != nil {
3252		return
3253	}
3254	opspec := tf.OpSpec{
3255		Type: "PopulationCount",
3256		Input: []tf.Input{
3257			x,
3258		},
3259	}
3260	op := scope.AddOperation(opspec)
3261	return op.Output(0)
3262}
3263
3264// Bucketize each feature based on bucket boundaries.
3265//
3266// An op that returns a list of float tensors, where each tensor represents the
3267// bucketized values for a single feature.
3268//
3269// Arguments:
3270//	float_values: float; List of Rank 1 Tensor each containing float values for a single feature.
3271//	bucket_boundaries: float; List of Rank 1 Tensors each containing the bucket boundaries for a single
3272// feature.
3273//
3274// Returns int; List of Rank 1 Tensors each containing the bucketized values for a single feature.
3275func BoostedTreesBucketize(scope *Scope, float_values []tf.Output, bucket_boundaries []tf.Output) (buckets []tf.Output) {
3276	if scope.Err() != nil {
3277		return
3278	}
3279	opspec := tf.OpSpec{
3280		Type: "BoostedTreesBucketize",
3281		Input: []tf.Input{
3282			tf.OutputList(float_values), tf.OutputList(bucket_boundaries),
3283		},
3284	}
3285	op := scope.AddOperation(opspec)
3286	if scope.Err() != nil {
3287		return
3288	}
3289	var idx int
3290	var err error
3291	if buckets, idx, err = makeOutputList(op, idx, "buckets"); err != nil {
3292		scope.UpdateErr("BoostedTreesBucketize", err)
3293		return
3294	}
3295	return buckets
3296}
3297
3298// BoostedTreesCreateQuantileStreamResourceAttr is an optional argument to BoostedTreesCreateQuantileStreamResource.
3299type BoostedTreesCreateQuantileStreamResourceAttr func(optionalAttr)
3300
3301// BoostedTreesCreateQuantileStreamResourceMaxElements sets the optional max_elements attribute to value.
3302//
3303// value: int; The maximum number of data points that can be fed to the stream.
3304// If not specified, defaults to 1099511627776
3305func BoostedTreesCreateQuantileStreamResourceMaxElements(value int64) BoostedTreesCreateQuantileStreamResourceAttr {
3306	return func(m optionalAttr) {
3307		m["max_elements"] = value
3308	}
3309}
3310
3311// Create the Resource for Quantile Streams.
3312//
3313// Arguments:
3314//	quantile_stream_resource_handle: resource; Handle to quantile stream resource.
3315//	epsilon: float; The required approximation error of the stream resource.
3316//	num_streams: int; The number of streams managed by the resource that shares the same epsilon.
3317//
3318// Returns the created operation.
3319func BoostedTreesCreateQuantileStreamResource(scope *Scope, quantile_stream_resource_handle tf.Output, epsilon tf.Output, num_streams tf.Output, optional ...BoostedTreesCreateQuantileStreamResourceAttr) (o *tf.Operation) {
3320	if scope.Err() != nil {
3321		return
3322	}
3323	attrs := map[string]interface{}{}
3324	for _, a := range optional {
3325		a(attrs)
3326	}
3327	opspec := tf.OpSpec{
3328		Type: "BoostedTreesCreateQuantileStreamResource",
3329		Input: []tf.Input{
3330			quantile_stream_resource_handle, epsilon, num_streams,
3331		},
3332		Attrs: attrs,
3333	}
3334	return scope.AddOperation(opspec)
3335}
3336
3337// Updates the tree ensemble by either adding a layer to the last tree being grown
3338//
3339// or by starting a new tree.
3340//
3341// Arguments:
3342//	tree_ensemble_handle: Handle to the ensemble variable.
3343//	feature_ids: Rank 1 tensor with ids for each feature. This is the real id of
3344// the feature that will be used in the split.
3345//	node_ids: List of rank 1 tensors representing the nodes for which this feature
3346// has a split.
3347//	gains: List of rank 1 tensors representing the gains for each of the feature's
3348// split.
3349//	thresholds: List of rank 1 tensors representing the thesholds for each of the
3350// feature's split.
3351//	left_node_contribs: List of rank 2 tensors with left leaf contribs for each of
3352// the feature's splits. Will be added to the previous node values to constitute
3353// the values of the left nodes.
3354//	right_node_contribs: List of rank 2 tensors with right leaf contribs for each
3355// of the feature's splits. Will be added to the previous node values to constitute
3356// the values of the right nodes.
3357//	max_depth: Max depth of the tree to build.
3358//	learning_rate: shrinkage const for each new tree.
3359//	pruning_mode: 0-No pruning, 1-Pre-pruning, 2-Post-pruning.
3360//
3361// Returns the created operation.
3362func BoostedTreesUpdateEnsemble(scope *Scope, tree_ensemble_handle tf.Output, feature_ids tf.Output, node_ids []tf.Output, gains []tf.Output, thresholds []tf.Output, left_node_contribs []tf.Output, right_node_contribs []tf.Output, max_depth tf.Output, learning_rate tf.Output, pruning_mode int64) (o *tf.Operation) {
3363	if scope.Err() != nil {
3364		return
3365	}
3366	attrs := map[string]interface{}{"pruning_mode": pruning_mode}
3367	opspec := tf.OpSpec{
3368		Type: "BoostedTreesUpdateEnsemble",
3369		Input: []tf.Input{
3370			tree_ensemble_handle, feature_ids, tf.OutputList(node_ids), tf.OutputList(gains), tf.OutputList(thresholds), tf.OutputList(left_node_contribs), tf.OutputList(right_node_contribs), max_depth, learning_rate,
3371		},
3372		Attrs: attrs,
3373	}
3374	return scope.AddOperation(opspec)
3375}
3376
3377// Runs multiple additive regression ensemble predictors on input instances and
3378//
3379// computes the update to cached logits. It is designed to be used during training.
3380// It traverses the trees starting from cached tree id and cached node id and
3381// calculates the updates to be pushed to the cache.
3382//
3383// Arguments:
3384//
3385//	cached_tree_ids: Rank 1 Tensor containing cached tree ids which is the starting
3386// tree of prediction.
3387//	cached_node_ids: Rank 1 Tensor containing cached node id which is the starting
3388// node of prediction.
3389//	bucketized_features: A list of rank 1 Tensors containing bucket id for each
3390// feature.
3391//	logits_dimension: scalar, dimension of the logits, to be used for partial logits
3392// shape.
3393//
3394// Returns:
3395//	partial_logits: Rank 2 Tensor containing logits update (with respect to cached
3396// values stored) for each example.
3397//	tree_ids: Rank 1 Tensor containing new tree ids for each example.
3398//	node_ids: Rank 1 Tensor containing new node ids in the new tree_ids.
3399func BoostedTreesTrainingPredict(scope *Scope, tree_ensemble_handle tf.Output, cached_tree_ids tf.Output, cached_node_ids tf.Output, bucketized_features []tf.Output, logits_dimension int64) (partial_logits tf.Output, tree_ids tf.Output, node_ids tf.Output) {
3400	if scope.Err() != nil {
3401		return
3402	}
3403	attrs := map[string]interface{}{"logits_dimension": logits_dimension}
3404	opspec := tf.OpSpec{
3405		Type: "BoostedTreesTrainingPredict",
3406		Input: []tf.Input{
3407			tree_ensemble_handle, cached_tree_ids, cached_node_ids, tf.OutputList(bucketized_features),
3408		},
3409		Attrs: attrs,
3410	}
3411	op := scope.AddOperation(opspec)
3412	return op.Output(0), op.Output(1), op.Output(2)
3413}
3414
3415// Aggregates the summary of accumulated stats for the batch.
3416//
3417// The summary stats contains gradients and hessians accumulated for each node, feature dimension id and bucket.
3418//
3419// Arguments:
3420//	node_ids: int32; Rank 1 Tensor containing node ids for each example, shape [batch_size].
3421//	gradients: float32; Rank 2 Tensor (shape=[batch_size, logits_dimension]) with gradients for each example.
3422//	hessians: float32; Rank 2 Tensor (shape=[batch_size, hessian_dimension]) with hessians for each example.
3423//	feature: int32; Rank 2 feature Tensors (shape=[batch_size, feature_dimension]).
3424//	max_splits: int; the maximum number of splits possible in the whole tree.
3425//	num_buckets: int; equals to the maximum possible value of bucketized feature.
3426//
3427// Returns output Rank 4 Tensor (shape=[splits, feature_dimension, buckets, logits_dimension + hessian_dimension])
3428// containing accumulated stats for each node, feature dimension and bucket.
3429func BoostedTreesAggregateStats(scope *Scope, node_ids tf.Output, gradients tf.Output, hessians tf.Output, feature tf.Output, max_splits int64, num_buckets int64) (stats_summary tf.Output) {
3430	if scope.Err() != nil {
3431		return
3432	}
3433	attrs := map[string]interface{}{"max_splits": max_splits, "num_buckets": num_buckets}
3434	opspec := tf.OpSpec{
3435		Type: "BoostedTreesAggregateStats",
3436		Input: []tf.Input{
3437			node_ids, gradients, hessians, feature,
3438		},
3439		Attrs: attrs,
3440	}
3441	op := scope.AddOperation(opspec)
3442	return op.Output(0)
3443}
3444
3445// Makes the summary of accumulated stats for the batch.
3446//
3447// The summary stats contains gradients and hessians accumulated into the corresponding node and bucket for each example.
3448//
3449// Arguments:
3450//	node_ids: int32 Rank 1 Tensor containing node ids, which each example falls into for the requested layer.
3451//	gradients: float32; Rank 2 Tensor (shape=[#examples, 1]) for gradients.
3452//	hessians: float32; Rank 2 Tensor (shape=[#examples, 1]) for hessians.
3453//	bucketized_features_list: int32 list of Rank 1 Tensors, each containing the bucketized feature (for each feature column).
3454//	max_splits: int; the maximum number of splits possible in the whole tree.
3455//	num_buckets: int; equals to the maximum possible value of bucketized feature.
3456//
3457// Returns output Rank 4 Tensor (shape=[#features, #splits, #buckets, 2]) containing accumulated stats put into the corresponding node and bucket. The first index of 4th dimension refers to gradients, and the second to hessians.
3458func BoostedTreesMakeStatsSummary(scope *Scope, node_ids tf.Output, gradients tf.Output, hessians tf.Output, bucketized_features_list []tf.Output, max_splits int64, num_buckets int64) (stats_summary tf.Output) {
3459	if scope.Err() != nil {
3460		return
3461	}
3462	attrs := map[string]interface{}{"max_splits": max_splits, "num_buckets": num_buckets}
3463	opspec := tf.OpSpec{
3464		Type: "BoostedTreesMakeStatsSummary",
3465		Input: []tf.Input{
3466			node_ids, gradients, hessians, tf.OutputList(bucketized_features_list),
3467		},
3468		Attrs: attrs,
3469	}
3470	op := scope.AddOperation(opspec)
3471	return op.Output(0)
3472}
3473
3474// Deserializes a serialized tree ensemble config and replaces current tree
3475//
3476// ensemble.
3477//
3478// Arguments:
3479//	tree_ensemble_handle: Handle to the tree ensemble.
3480//	stamp_token: Token to use as the new value of the resource stamp.
3481//	tree_ensemble_serialized: Serialized proto of the ensemble.
3482//
3483// Returns the created operation.
3484func BoostedTreesDeserializeEnsemble(scope *Scope, tree_ensemble_handle tf.Output, stamp_token tf.Output, tree_ensemble_serialized tf.Output) (o *tf.Operation) {
3485	if scope.Err() != nil {
3486		return
3487	}
3488	opspec := tf.OpSpec{
3489		Type: "BoostedTreesDeserializeEnsemble",
3490		Input: []tf.Input{
3491			tree_ensemble_handle, stamp_token, tree_ensemble_serialized,
3492		},
3493	}
3494	return scope.AddOperation(opspec)
3495}
3496
3497// Flush the quantile summaries from each quantile stream resource.
3498//
3499// An op that outputs a list of quantile summaries of a quantile stream resource.
3500// Each summary Tensor is rank 2, containing summaries (value, weight, min_rank,
3501// max_rank) for a single feature.
3502//
3503// Arguments:
3504//	quantile_stream_resource_handle: resource handle referring to a QuantileStreamResource.
3505//
3506func BoostedTreesFlushQuantileSummaries(scope *Scope, quantile_stream_resource_handle tf.Output, num_features int64) (summaries []tf.Output) {
3507	if scope.Err() != nil {
3508		return
3509	}
3510	attrs := map[string]interface{}{"num_features": num_features}
3511	opspec := tf.OpSpec{
3512		Type: "BoostedTreesFlushQuantileSummaries",
3513		Input: []tf.Input{
3514			quantile_stream_resource_handle,
3515		},
3516		Attrs: attrs,
3517	}
3518	op := scope.AddOperation(opspec)
3519	if scope.Err() != nil {
3520		return
3521	}
3522	var idx int
3523	var err error
3524	if summaries, idx, err = makeOutputList(op, idx, "summaries"); err != nil {
3525		scope.UpdateErr("BoostedTreesFlushQuantileSummaries", err)
3526		return
3527	}
3528	return summaries
3529}
3530
3531// BoostedTreesSparseCalculateBestFeatureSplitAttr is an optional argument to BoostedTreesSparseCalculateBestFeatureSplit.
3532type BoostedTreesSparseCalculateBestFeatureSplitAttr func(optionalAttr)
3533
3534// BoostedTreesSparseCalculateBestFeatureSplitSplitType sets the optional split_type attribute to value.
3535//
3536// value: A string indicating if this Op should perform inequality split or equality split.
3537// If not specified, defaults to "inequality"
3538func BoostedTreesSparseCalculateBestFeatureSplitSplitType(value string) BoostedTreesSparseCalculateBestFeatureSplitAttr {
3539	return func(m optionalAttr) {
3540		m["split_type"] = value
3541	}
3542}
3543
3544// Calculates gains for each feature and returns the best possible split information for the feature.
3545//
3546// The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature.
3547//
3548// It is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return `node_ids_list` for each feature, containing the list of nodes that this feature can be used to split.
3549//
3550// In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features).
3551//
3552// The output shapes are compatible in a way that the first dimension of all tensors are the same and equal to the number of possible split nodes for each feature.
3553//
3554// Arguments:
3555//	node_id_range: A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within `stats_summary_list`. The nodes are iterated between the two nodes specified by the tensor, as like `for node_id in range(node_id_range[0], node_id_range[1])` (Note that the last index node_id_range[1] is exclusive).
3556//	stats_summary_indices: A Rank 2 int64 tensor of dense shape [N, 4] (N specifies the number of non-zero values) for accumulated stats summary (gradient/hessian) per node per bucket for each feature. The second dimension contains node id, feature dimension, bucket id, and stats dim.
3557// stats dim is the sum of logits dimension and hessian dimension, hessian dimension can either be logits dimension if diagonal hessian is used, or logits dimension^2 if full hessian is used.
3558//	stats_summary_values: A Rank 1 float tensor of dense shape [N] (N specifies the number of non-zero values), which supplies the values for each element in summary_indices.
3559//	stats_summary_shape: A Rank 1 float tensor of dense shape [4], which specifies the dense shape of the sparse tensor, which is [num tree nodes, feature dimensions, num buckets, stats dim].
3560//	l1: l1 regularization factor on leaf weights, per instance based.
3561//	l2: l2 regularization factor on leaf weights, per instance based.
3562//	tree_complexity: adjustment to the gain, per leaf based.
3563//	min_node_weight: minimum avg of hessians in a node before required for the node to be considered for splitting.
3564//	logits_dimension: The dimension of logit, i.e., number of classes.
3565//
3566// Returns:
3567//	node_ids: A Rank 1 tensor indicating possible node ids that can be split.
3568//	gains: A Rank 1 tensor indicating the best gains to split each node.
3569//	feature_dimensions: A Rank 1 tensor indicating the best feature dimension for each feature to split for each node.
3570//	thresholds: A Rank 1 tensor indicating the bucket id to compare with (as a threshold) for split in each node.
3571//	left_node_contribs: A Rank 2 tensor indicating the contribution of the left nodes when branching from parent nodes to the left direction by the given threshold for each feature.
3572// This value will be used to make the left node value by adding to the parent node value. Second dimension size is logits dimension.
3573//	right_node_contribs: A Rank 2 tensor, with the same shape/conditions as left_node_contribs_list, but just that the value is for the right node.
3574//	split_with_default_directions: A Rank 1 tensor indicating which direction to go if data is missing.
3575// Inequality with default left returns 0, inequality with default right returns 1, equality with default right returns 2.
3576func BoostedTreesSparseCalculateBestFeatureSplit(scope *Scope, node_id_range tf.Output, stats_summary_indices tf.Output, stats_summary_values tf.Output, stats_summary_shape tf.Output, l1 tf.Output, l2 tf.Output, tree_complexity tf.Output, min_node_weight tf.Output, logits_dimension int64, optional ...BoostedTreesSparseCalculateBestFeatureSplitAttr) (node_ids tf.Output, gains tf.Output, feature_dimensions tf.Output, thresholds tf.Output, left_node_contribs tf.Output, right_node_contribs tf.Output, split_with_default_directions tf.Output) {
3577	if scope.Err() != nil {
3578		return
3579	}
3580	attrs := map[string]interface{}{"logits_dimension": logits_dimension}
3581	for _, a := range optional {
3582		a(attrs)
3583	}
3584	opspec := tf.OpSpec{
3585		Type: "BoostedTreesSparseCalculateBestFeatureSplit",
3586		Input: []tf.Input{
3587			node_id_range, stats_summary_indices, stats_summary_values, stats_summary_shape, l1, l2, tree_complexity, min_node_weight,
3588		},
3589		Attrs: attrs,
3590	}
3591	op := scope.AddOperation(opspec)
3592	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4), op.Output(5), op.Output(6)
3593}
3594
3595// Calculates gains for each feature and returns the best possible split information for each node. However, if no split is found, then no split information is returned for that node.
3596//
3597// The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature.
3598//
3599// It is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return `node_ids_list` for each feature, containing the list of nodes that this feature can be used to split.
3600//
3601// In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features).
3602//
3603// The output shapes are compatible in a way that the first dimension of all tensors are the same and equal to the number of possible split nodes for each feature.
3604//
3605// Arguments:
3606//	node_id_range: A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within `stats_summary_list`. The nodes are iterated between the two nodes specified by the tensor, as like `for node_id in range(node_id_range[0], node_id_range[1])` (Note that the last index node_id_range[1] is exclusive).
3607//	stats_summaries_list: A list of Rank 4 tensor (#shape=[max_splits, feature_dims, bucket, stats_dims]) for accumulated stats summary (gradient/hessian) per node, per dimension, per buckets for each feature.
3608// The first dimension of the tensor is the maximum number of splits, and thus not all elements of it will be used, but only the indexes specified by node_ids will be used.
3609//	split_types: A Rank 1 tensor indicating if this Op should perform inequality split or equality split per feature.
3610//	candidate_feature_ids: Rank 1 tensor with ids for each feature. This is the real id of the feature.
3611//	l1: l1 regularization factor on leaf weights, per instance based.
3612//	l2: l2 regularization factor on leaf weights, per instance based.
3613//	tree_complexity: adjustment to the gain, per leaf based.
3614//	min_node_weight: minimum avg of hessians in a node before required for the node to be considered for splitting.
3615//	logits_dimension: The dimension of logit, i.e., number of classes.
3616//
3617// Returns:
3618//	node_ids: A Rank 1 tensors indicating possible split node ids for each feature. The length of the list is num_features, but each tensor has different size as each feature provides different possible nodes. See above for details like shapes and sizes.
3619//	gains: A Rank 1 tensor indicating the best gains for each feature to split for certain nodes. See above for details like shapes and sizes.
3620//	feature_ids: A Rank 1 tensors indicating the best feature id for each node. See above for details like shapes and sizes.
3621//	feature_dimensions: A Rank 1 tensors indicating the best feature dimension for each feature to split for certain nodes if the feature is multi-dimension. See above for details like shapes and sizes.
3622//	thresholds: A Rank 1 tensors indicating the bucket id to compare with (as a threshold) for split in each node. See above for details like shapes and sizes.
3623//	left_node_contribs: A Rank 2 tensors indicating the contribution of the left nodes when branching from parent nodes (given by the tensor element in the output node_ids_list) to the left direction by the given threshold for each feature. This value will be used to make the left node value by adding to the parent node value. Second dimension size is 1 for 1-dimensional logits, but would be larger for multi-class problems. See above for details like shapes and sizes.
3624//	right_node_contribs: A Rank 2 tensors, with the same shape/conditions as left_node_contribs_list, but just that the value is for the right node.
3625//	split_with_default_directions: A Rank 1 tensors indicating the which direction to go if data is missing. See above for details like shapes and sizes.
3626// Inequality with default left returns 0, inequality with default right returns 1, equality with default right returns 2.
3627func BoostedTreesCalculateBestFeatureSplitV2(scope *Scope, node_id_range tf.Output, stats_summaries_list []tf.Output, split_types tf.Output, candidate_feature_ids tf.Output, l1 tf.Output, l2 tf.Output, tree_complexity tf.Output, min_node_weight tf.Output, logits_dimension int64) (node_ids tf.Output, gains tf.Output, feature_ids tf.Output, feature_dimensions tf.Output, thresholds tf.Output, left_node_contribs tf.Output, right_node_contribs tf.Output, split_with_default_directions tf.Output) {
3628	if scope.Err() != nil {
3629		return
3630	}
3631	attrs := map[string]interface{}{"logits_dimension": logits_dimension}
3632	opspec := tf.OpSpec{
3633		Type: "BoostedTreesCalculateBestFeatureSplitV2",
3634		Input: []tf.Input{
3635			node_id_range, tf.OutputList(stats_summaries_list), split_types, candidate_feature_ids, l1, l2, tree_complexity, min_node_weight,
3636		},
3637		Attrs: attrs,
3638	}
3639	op := scope.AddOperation(opspec)
3640	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4), op.Output(5), op.Output(6), op.Output(7)
3641}
3642
3643// Calculates gains for each feature and returns the best possible split information for the feature.
3644//
3645// The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature.
3646//
3647// It is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return `node_ids_list` for each feature, containing the list of nodes that this feature can be used to split.
3648//
3649// In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features).
3650//
3651// The length of output lists are all of the same length, `num_features`.
3652// The output shapes are compatible in a way that the first dimension of all tensors of all lists are the same and equal to the number of possible split nodes for each feature.
3653//
3654// Arguments:
3655//	node_id_range: A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within `stats_summary_list`. The nodes are iterated between the two nodes specified by the tensor, as like `for node_id in range(node_id_range[0], node_id_range[1])` (Note that the last index node_id_range[1] is exclusive).
3656//	stats_summary_list: A list of Rank 3 tensor (#shape=[max_splits, bucket, 2]) for accumulated stats summary (gradient/hessian) per node per buckets for each feature. The first dimension of the tensor is the maximum number of splits, and thus not all elements of it will be used, but only the indexes specified by node_ids will be used.
3657//	l1: l1 regularization factor on leaf weights, per instance based.
3658//	l2: l2 regularization factor on leaf weights, per instance based.
3659//	tree_complexity: adjustment to the gain, per leaf based.
3660//	min_node_weight: minimum avg of hessians in a node before required for the node to be considered for splitting.
3661//	max_splits: the number of nodes that can be split in the whole tree. Used as a dimension of output tensors.
3662//
3663// Returns:
3664//	node_ids_list: An output list of Rank 1 tensors indicating possible split node ids for each feature. The length of the list is num_features, but each tensor has different size as each feature provides different possible nodes. See above for details like shapes and sizes.
3665//	gains_list: An output list of Rank 1 tensors indicating the best gains for each feature to split for certain nodes. See above for details like shapes and sizes.
3666//	thresholds_list: An output list of Rank 1 tensors indicating the bucket id to compare with (as a threshold) for split in each node. See above for details like shapes and sizes.
3667//	left_node_contribs_list: A list of Rank 2 tensors indicating the contribution of the left nodes when branching from parent nodes (given by the tensor element in the output node_ids_list) to the left direction by the given threshold for each feature. This value will be used to make the left node value by adding to the parent node value. Second dimension size is 1 for 1-dimensional logits, but would be larger for multi-class problems. See above for details like shapes and sizes.
3668//	right_node_contribs_list: A list of Rank 2 tensors, with the same shape/conditions as left_node_contribs_list, but just that the value is for the right node.
3669func BoostedTreesCalculateBestGainsPerFeature(scope *Scope, node_id_range tf.Output, stats_summary_list []tf.Output, l1 tf.Output, l2 tf.Output, tree_complexity tf.Output, min_node_weight tf.Output, max_splits int64) (node_ids_list []tf.Output, gains_list []tf.Output, thresholds_list []tf.Output, left_node_contribs_list []tf.Output, right_node_contribs_list []tf.Output) {
3670	if scope.Err() != nil {
3671		return
3672	}
3673	attrs := map[string]interface{}{"max_splits": max_splits}
3674	opspec := tf.OpSpec{
3675		Type: "BoostedTreesCalculateBestGainsPerFeature",
3676		Input: []tf.Input{
3677			node_id_range, tf.OutputList(stats_summary_list), l1, l2, tree_complexity, min_node_weight,
3678		},
3679		Attrs: attrs,
3680	}
3681	op := scope.AddOperation(opspec)
3682	if scope.Err() != nil {
3683		return
3684	}
3685	var idx int
3686	var err error
3687	if node_ids_list, idx, err = makeOutputList(op, idx, "node_ids_list"); err != nil {
3688		scope.UpdateErr("BoostedTreesCalculateBestGainsPerFeature", err)
3689		return
3690	}
3691	if gains_list, idx, err = makeOutputList(op, idx, "gains_list"); err != nil {
3692		scope.UpdateErr("BoostedTreesCalculateBestGainsPerFeature", err)
3693		return
3694	}
3695	if thresholds_list, idx, err = makeOutputList(op, idx, "thresholds_list"); err != nil {
3696		scope.UpdateErr("BoostedTreesCalculateBestGainsPerFeature", err)
3697		return
3698	}
3699	if left_node_contribs_list, idx, err = makeOutputList(op, idx, "left_node_contribs_list"); err != nil {
3700		scope.UpdateErr("BoostedTreesCalculateBestGainsPerFeature", err)
3701		return
3702	}
3703	if right_node_contribs_list, idx, err = makeOutputList(op, idx, "right_node_contribs_list"); err != nil {
3704		scope.UpdateErr("BoostedTreesCalculateBestGainsPerFeature", err)
3705		return
3706	}
3707	return node_ids_list, gains_list, thresholds_list, left_node_contribs_list, right_node_contribs_list
3708}
3709
3710// Checks whether a tree ensemble has been initialized.
3711//
3712// Arguments:
3713//	tree_ensemble_handle: Handle to the tree ensemble resource.
3714//
3715// Returns output boolean on whether it is initialized or not.
3716func IsBoostedTreesEnsembleInitialized(scope *Scope, tree_ensemble_handle tf.Output) (is_initialized tf.Output) {
3717	if scope.Err() != nil {
3718		return
3719	}
3720	opspec := tf.OpSpec{
3721		Type: "IsBoostedTreesEnsembleInitialized",
3722		Input: []tf.Input{
3723			tree_ensemble_handle,
3724		},
3725	}
3726	op := scope.AddOperation(opspec)
3727	return op.Output(0)
3728}
3729
3730// BoostedTreesEnsembleResourceHandleOpAttr is an optional argument to BoostedTreesEnsembleResourceHandleOp.
3731type BoostedTreesEnsembleResourceHandleOpAttr func(optionalAttr)
3732
3733// BoostedTreesEnsembleResourceHandleOpContainer sets the optional container attribute to value.
3734// If not specified, defaults to ""
3735func BoostedTreesEnsembleResourceHandleOpContainer(value string) BoostedTreesEnsembleResourceHandleOpAttr {
3736	return func(m optionalAttr) {
3737		m["container"] = value
3738	}
3739}
3740
3741// BoostedTreesEnsembleResourceHandleOpSharedName sets the optional shared_name attribute to value.
3742// If not specified, defaults to ""
3743func BoostedTreesEnsembleResourceHandleOpSharedName(value string) BoostedTreesEnsembleResourceHandleOpAttr {
3744	return func(m optionalAttr) {
3745		m["shared_name"] = value
3746	}
3747}
3748
3749// Creates a handle to a BoostedTreesEnsembleResource
3750func BoostedTreesEnsembleResourceHandleOp(scope *Scope, optional ...BoostedTreesEnsembleResourceHandleOpAttr) (resource tf.Output) {
3751	if scope.Err() != nil {
3752		return
3753	}
3754	attrs := map[string]interface{}{}
3755	for _, a := range optional {
3756		a(attrs)
3757	}
3758	opspec := tf.OpSpec{
3759		Type: "BoostedTreesEnsembleResourceHandleOp",
3760
3761		Attrs: attrs,
3762	}
3763	op := scope.AddOperation(opspec)
3764	return op.Output(0)
3765}
3766
3767// Deserializes a proto into the tree handle
3768//
3769// Arguments:
3770//	tree_handle: Handle to the tree resource to be restored.
3771//	tree_config: Serialied proto string of the boosted_trees.Tree proto.
3772//
3773// Returns the created operation.
3774func TensorForestTreeDeserialize(scope *Scope, tree_handle tf.Output, tree_config tf.Output) (o *tf.Operation) {
3775	if scope.Err() != nil {
3776		return
3777	}
3778	opspec := tf.OpSpec{
3779		Type: "TensorForestTreeDeserialize",
3780		Input: []tf.Input{
3781			tree_handle, tree_config,
3782		},
3783	}
3784	return scope.AddOperation(opspec)
3785}
3786
3787// Serializes the tree handle to a proto
3788//
3789// Arguments:
3790//	tree_handle: Handle to the tree resource to be serialized.
3791//
3792// Returns Serialied proto string of the tree resource.
3793func TensorForestTreeSerialize(scope *Scope, tree_handle tf.Output) (tree_config tf.Output) {
3794	if scope.Err() != nil {
3795		return
3796	}
3797	opspec := tf.OpSpec{
3798		Type: "TensorForestTreeSerialize",
3799		Input: []tf.Input{
3800			tree_handle,
3801		},
3802	}
3803	op := scope.AddOperation(opspec)
3804	return op.Output(0)
3805}
3806
3807// Creates a tree resource and returns a handle to it.
3808//
3809// Arguments:
3810//	tree_handle: Handle to the tree resource to be created.
3811//	tree_config: Serialized proto string of the boosted_trees.Tree.
3812//
3813// Returns the created operation.
3814func TensorForestCreateTreeVariable(scope *Scope, tree_handle tf.Output, tree_config tf.Output) (o *tf.Operation) {
3815	if scope.Err() != nil {
3816		return
3817	}
3818	opspec := tf.OpSpec{
3819		Type: "TensorForestCreateTreeVariable",
3820		Input: []tf.Input{
3821			tree_handle, tree_config,
3822		},
3823	}
3824	return scope.AddOperation(opspec)
3825}
3826
3827// Checks whether a tree has been initialized.
3828//
3829// Arguments:
3830//	tree_handle: Handle to the tree.
3831//
3832// Returns Whether the tree is initialized.
3833func TensorForestTreeIsInitializedOp(scope *Scope, tree_handle tf.Output) (is_initialized tf.Output) {
3834	if scope.Err() != nil {
3835		return
3836	}
3837	opspec := tf.OpSpec{
3838		Type: "TensorForestTreeIsInitializedOp",
3839		Input: []tf.Input{
3840			tree_handle,
3841		},
3842	}
3843	op := scope.AddOperation(opspec)
3844	return op.Output(0)
3845}
3846
3847// TensorForestTreeResourceHandleOpAttr is an optional argument to TensorForestTreeResourceHandleOp.
3848type TensorForestTreeResourceHandleOpAttr func(optionalAttr)
3849
3850// TensorForestTreeResourceHandleOpContainer sets the optional container attribute to value.
3851// If not specified, defaults to ""
3852func TensorForestTreeResourceHandleOpContainer(value string) TensorForestTreeResourceHandleOpAttr {
3853	return func(m optionalAttr) {
3854		m["container"] = value
3855	}
3856}
3857
3858// TensorForestTreeResourceHandleOpSharedName sets the optional shared_name attribute to value.
3859// If not specified, defaults to ""
3860func TensorForestTreeResourceHandleOpSharedName(value string) TensorForestTreeResourceHandleOpAttr {
3861	return func(m optionalAttr) {
3862		m["shared_name"] = value
3863	}
3864}
3865
3866// Creates a handle to a TensorForestTreeResource
3867func TensorForestTreeResourceHandleOp(scope *Scope, optional ...TensorForestTreeResourceHandleOpAttr) (resource tf.Output) {
3868	if scope.Err() != nil {
3869		return
3870	}
3871	attrs := map[string]interface{}{}
3872	for _, a := range optional {
3873		a(attrs)
3874	}
3875	opspec := tf.OpSpec{
3876		Type: "TensorForestTreeResourceHandleOp",
3877
3878		Attrs: attrs,
3879	}
3880	op := scope.AddOperation(opspec)
3881	return op.Output(0)
3882}
3883
3884// AllCandidateSamplerAttr is an optional argument to AllCandidateSampler.
3885type AllCandidateSamplerAttr func(optionalAttr)
3886
3887// AllCandidateSamplerSeed sets the optional seed attribute to value.
3888//
3889// value: If either seed or seed2 are set to be non-zero, the random number
3890// generator is seeded by the given seed.  Otherwise, it is seeded by a
3891// random seed.
3892// If not specified, defaults to 0
3893func AllCandidateSamplerSeed(value int64) AllCandidateSamplerAttr {
3894	return func(m optionalAttr) {
3895		m["seed"] = value
3896	}
3897}
3898
3899// AllCandidateSamplerSeed2 sets the optional seed2 attribute to value.
3900//
3901// value: An second seed to avoid seed collision.
3902// If not specified, defaults to 0
3903func AllCandidateSamplerSeed2(value int64) AllCandidateSamplerAttr {
3904	return func(m optionalAttr) {
3905		m["seed2"] = value
3906	}
3907}
3908
3909// Generates labels for candidate sampling with a learned unigram distribution.
3910//
3911// See explanations of candidate sampling and the data formats at
3912// go/candidate-sampling.
3913//
3914// For each batch, this op picks a single set of sampled candidate labels.
3915//
3916// The advantages of sampling candidates per-batch are simplicity and the
3917// possibility of efficient dense matrix multiplication. The disadvantage is that
3918// the sampled candidates must be chosen independently of the context and of the
3919// true labels.
3920//
3921// Arguments:
3922//	true_classes: A batch_size * num_true matrix, in which each row contains the
3923// IDs of the num_true target_classes in the corresponding original label.
3924//	num_true: Number of true labels per context.
3925//	num_sampled: Number of candidates to produce.
3926//	unique: If unique is true, we sample with rejection, so that all sampled
3927// candidates in a batch are unique. This requires some approximation to
3928// estimate the post-rejection sampling probabilities.
3929//
3930// Returns:
3931//	sampled_candidates: A vector of length num_sampled, in which each element is
3932// the ID of a sampled candidate.
3933//	true_expected_count: A batch_size * num_true matrix, representing
3934// the number of times each candidate is expected to occur in a batch
3935// of sampled candidates. If unique=true, then this is a probability.
3936//	sampled_expected_count: A vector of length num_sampled, for each sampled
3937// candidate representing the number of times the candidate is expected
3938// to occur in a batch of sampled candidates.  If unique=true, then this is a
3939// probability.
3940func AllCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, optional ...AllCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output) {
3941	if scope.Err() != nil {
3942		return
3943	}
3944	attrs := map[string]interface{}{"num_true": num_true, "num_sampled": num_sampled, "unique": unique}
3945	for _, a := range optional {
3946		a(attrs)
3947	}
3948	opspec := tf.OpSpec{
3949		Type: "AllCandidateSampler",
3950		Input: []tf.Input{
3951			true_classes,
3952		},
3953		Attrs: attrs,
3954	}
3955	op := scope.AddOperation(opspec)
3956	return op.Output(0), op.Output(1), op.Output(2)
3957}
3958
3959// FixedUnigramCandidateSamplerAttr is an optional argument to FixedUnigramCandidateSampler.
3960type FixedUnigramCandidateSamplerAttr func(optionalAttr)
3961
3962// FixedUnigramCandidateSamplerVocabFile sets the optional vocab_file attribute to value.
3963//
3964// value: Each valid line in this file (which should have a CSV-like format)
3965// corresponds to a valid word ID. IDs are in sequential order, starting from
3966// num_reserved_ids. The last entry in each line is expected to be a value
3967// corresponding to the count or relative probability. Exactly one of vocab_file
3968// and unigrams needs to be passed to this op.
3969// If not specified, defaults to ""
3970func FixedUnigramCandidateSamplerVocabFile(value string) FixedUnigramCandidateSamplerAttr {
3971	return func(m optionalAttr) {
3972		m["vocab_file"] = value
3973	}
3974}
3975
3976// FixedUnigramCandidateSamplerDistortion sets the optional distortion attribute to value.
3977//
3978// value: The distortion is used to skew the unigram probability distribution.
3979// Each weight is first raised to the distortion's power before adding to the
3980// internal unigram distribution. As a result, distortion = 1.0 gives regular
3981// unigram sampling (as defined by the vocab file), and distortion = 0.0 gives
3982// a uniform distribution.
3983// If not specified, defaults to 1
3984func FixedUnigramCandidateSamplerDistortion(value float32) FixedUnigramCandidateSamplerAttr {
3985	return func(m optionalAttr) {
3986		m["distortion"] = value
3987	}
3988}
3989
3990// FixedUnigramCandidateSamplerNumReservedIds sets the optional num_reserved_ids attribute to value.
3991//
3992// value: Optionally some reserved IDs can be added in the range [0,
3993// ..., num_reserved_ids) by the users. One use case is that a special unknown
3994// word token is used as ID 0. These IDs will have a sampling probability of 0.
3995// If not specified, defaults to 0
3996func FixedUnigramCandidateSamplerNumReservedIds(value int64) FixedUnigramCandidateSamplerAttr {
3997	return func(m optionalAttr) {
3998		m["num_reserved_ids"] = value
3999	}
4000}
4001
4002// FixedUnigramCandidateSamplerNumShards sets the optional num_shards attribute to value.
4003//
4004// value: A sampler can be used to sample from a subset of the original range
4005// in order to speed up the whole computation through parallelism. This parameter
4006// (together with 'shard') indicates the number of partitions that are being
4007// used in the overall computation.
4008// If not specified, defaults to 1
4009//
4010// REQUIRES: value >= 1
4011func FixedUnigramCandidateSamplerNumShards(value int64) FixedUnigramCandidateSamplerAttr {
4012	return func(m optionalAttr) {
4013		m["num_shards"] = value
4014	}
4015}
4016
4017// FixedUnigramCandidateSamplerShard sets the optional shard attribute to value.
4018//
4019// value: A sampler can be used to sample from a subset of the original range
4020// in order to speed up the whole computation through parallelism. This parameter
4021// (together with 'num_shards') indicates the particular partition number of a
4022// sampler op, when partitioning is being used.
4023// If not specified, defaults to 0
4024//
4025// REQUIRES: value >= 0
4026func FixedUnigramCandidateSamplerShard(value int64) FixedUnigramCandidateSamplerAttr {
4027	return func(m optionalAttr) {
4028		m["shard"] = value
4029	}
4030}
4031
4032// FixedUnigramCandidateSamplerUnigrams sets the optional unigrams attribute to value.
4033//
4034// value: A list of unigram counts or probabilities, one per ID in sequential
4035// order. Exactly one of vocab_file and unigrams should be passed to this op.
4036// If not specified, defaults to <>
4037func FixedUnigramCandidateSamplerUnigrams(value []float32) FixedUnigramCandidateSamplerAttr {
4038	return func(m optionalAttr) {
4039		m["unigrams"] = value
4040	}
4041}
4042
4043// FixedUnigramCandidateSamplerSeed sets the optional seed attribute to value.
4044//
4045// value: If either seed or seed2 are set to be non-zero, the random number
4046// generator is seeded by the given seed.  Otherwise, it is seeded by a
4047// random seed.
4048// If not specified, defaults to 0
4049func FixedUnigramCandidateSamplerSeed(value int64) FixedUnigramCandidateSamplerAttr {
4050	return func(m optionalAttr) {
4051		m["seed"] = value
4052	}
4053}
4054
4055// FixedUnigramCandidateSamplerSeed2 sets the optional seed2 attribute to value.
4056//
4057// value: An second seed to avoid seed collision.
4058// If not specified, defaults to 0
4059func FixedUnigramCandidateSamplerSeed2(value int64) FixedUnigramCandidateSamplerAttr {
4060	return func(m optionalAttr) {
4061		m["seed2"] = value
4062	}
4063}
4064
4065// Generates labels for candidate sampling with a learned unigram distribution.
4066//
4067// A unigram sampler could use a fixed unigram distribution read from a
4068// file or passed in as an in-memory array instead of building up the distribution
4069// from data on the fly. There is also an option to skew the distribution by
4070// applying a distortion power to the weights.
4071//
4072// The vocabulary file should be in CSV-like format, with the last field
4073// being the weight associated with the word.
4074//
4075// For each batch, this op picks a single set of sampled candidate labels.
4076//
4077// The advantages of sampling candidates per-batch are simplicity and the
4078// possibility of efficient dense matrix multiplication. The disadvantage is that
4079// the sampled candidates must be chosen independently of the context and of the
4080// true labels.
4081//
4082// Arguments:
4083//	true_classes: A batch_size * num_true matrix, in which each row contains the
4084// IDs of the num_true target_classes in the corresponding original label.
4085//	num_true: Number of true labels per context.
4086//	num_sampled: Number of candidates to randomly sample.
4087//	unique: If unique is true, we sample with rejection, so that all sampled
4088// candidates in a batch are unique. This requires some approximation to
4089// estimate the post-rejection sampling probabilities.
4090//	range_max: The sampler will sample integers from the interval [0, range_max).
4091//
4092// Returns:
4093//	sampled_candidates: A vector of length num_sampled, in which each element is
4094// the ID of a sampled candidate.
4095//	true_expected_count: A batch_size * num_true matrix, representing
4096// the number of times each candidate is expected to occur in a batch
4097// of sampled candidates. If unique=true, then this is a probability.
4098//	sampled_expected_count: A vector of length num_sampled, for each sampled
4099// candidate representing the number of times the candidate is expected
4100// to occur in a batch of sampled candidates.  If unique=true, then this is a
4101// probability.
4102func FixedUnigramCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, range_max int64, optional ...FixedUnigramCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output) {
4103	if scope.Err() != nil {
4104		return
4105	}
4106	attrs := map[string]interface{}{"num_true": num_true, "num_sampled": num_sampled, "unique": unique, "range_max": range_max}
4107	for _, a := range optional {
4108		a(attrs)
4109	}
4110	opspec := tf.OpSpec{
4111		Type: "FixedUnigramCandidateSampler",
4112		Input: []tf.Input{
4113			true_classes,
4114		},
4115		Attrs: attrs,
4116	}
4117	op := scope.AddOperation(opspec)
4118	return op.Output(0), op.Output(1), op.Output(2)
4119}
4120
4121// ThreadUnsafeUnigramCandidateSamplerAttr is an optional argument to ThreadUnsafeUnigramCandidateSampler.
4122type ThreadUnsafeUnigramCandidateSamplerAttr func(optionalAttr)
4123
4124// ThreadUnsafeUnigramCandidateSamplerSeed sets the optional seed attribute to value.
4125//
4126// value: If either seed or seed2 are set to be non-zero, the random number
4127// generator is seeded by the given seed.  Otherwise, it is seeded by a
4128// random seed.
4129// If not specified, defaults to 0
4130func ThreadUnsafeUnigramCandidateSamplerSeed(value int64) ThreadUnsafeUnigramCandidateSamplerAttr {
4131	return func(m optionalAttr) {
4132		m["seed"] = value
4133	}
4134}
4135
4136// ThreadUnsafeUnigramCandidateSamplerSeed2 sets the optional seed2 attribute to value.
4137//
4138// value: An second seed to avoid seed collision.
4139// If not specified, defaults to 0
4140func ThreadUnsafeUnigramCandidateSamplerSeed2(value int64) ThreadUnsafeUnigramCandidateSamplerAttr {
4141	return func(m optionalAttr) {
4142		m["seed2"] = value
4143	}
4144}
4145
4146// Generates labels for candidate sampling with a learned unigram distribution.
4147//
4148// See explanations of candidate sampling and the data formats at
4149// go/candidate-sampling.
4150//
4151// For each batch, this op picks a single set of sampled candidate labels.
4152//
4153// The advantages of sampling candidates per-batch are simplicity and the
4154// possibility of efficient dense matrix multiplication. The disadvantage is that
4155// the sampled candidates must be chosen independently of the context and of the
4156// true labels.
4157//
4158// Arguments:
4159//	true_classes: A batch_size * num_true matrix, in which each row contains the
4160// IDs of the num_true target_classes in the corresponding original label.
4161//	num_true: Number of true labels per context.
4162//	num_sampled: Number of candidates to randomly sample.
4163//	unique: If unique is true, we sample with rejection, so that all sampled
4164// candidates in a batch are unique. This requires some approximation to
4165// estimate the post-rejection sampling probabilities.
4166//	range_max: The sampler will sample integers from the interval [0, range_max).
4167//
4168// Returns:
4169//	sampled_candidates: A vector of length num_sampled, in which each element is
4170// the ID of a sampled candidate.
4171//	true_expected_count: A batch_size * num_true matrix, representing
4172// the number of times each candidate is expected to occur in a batch
4173// of sampled candidates. If unique=true, then this is a probability.
4174//	sampled_expected_count: A vector of length num_sampled, for each sampled
4175// candidate representing the number of times the candidate is expected
4176// to occur in a batch of sampled candidates.  If unique=true, then this is a
4177// probability.
4178func ThreadUnsafeUnigramCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, range_max int64, optional ...ThreadUnsafeUnigramCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output) {
4179	if scope.Err() != nil {
4180		return
4181	}
4182	attrs := map[string]interface{}{"num_true": num_true, "num_sampled": num_sampled, "unique": unique, "range_max": range_max}
4183	for _, a := range optional {
4184		a(attrs)
4185	}
4186	opspec := tf.OpSpec{
4187		Type: "ThreadUnsafeUnigramCandidateSampler",
4188		Input: []tf.Input{
4189			true_classes,
4190		},
4191		Attrs: attrs,
4192	}
4193	op := scope.AddOperation(opspec)
4194	return op.Output(0), op.Output(1), op.Output(2)
4195}
4196
4197// MatrixDiagPartV3Attr is an optional argument to MatrixDiagPartV3.
4198type MatrixDiagPartV3Attr func(optionalAttr)
4199
4200// MatrixDiagPartV3Align sets the optional align attribute to value.
4201//
4202// value: Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is
4203// a string specifying how superdiagonals and subdiagonals should be aligned,
4204// respectively. There are four possible alignments: "RIGHT_LEFT" (default),
4205// "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals
4206// to the right (left-pads the row) and subdiagonals to the left (right-pads the
4207// row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is
4208// the opposite alignment.
4209// If not specified, defaults to "RIGHT_LEFT"
4210func MatrixDiagPartV3Align(value string) MatrixDiagPartV3Attr {
4211	return func(m optionalAttr) {
4212		m["align"] = value
4213	}
4214}
4215
4216// Returns the batched diagonal part of a batched tensor.
4217//
4218// Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched
4219// `input`.
4220//
4221// Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`.
4222// Let `max_diag_len` be the maximum length among all diagonals to be extracted,
4223// `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
4224// Let `num_diags` be the number of diagonals to extract,
4225// `num_diags = k[1] - k[0] + 1`.
4226//
4227// If `num_diags == 1`, the output tensor is of rank `r - 1` with shape
4228// `[I, J, ..., L, max_diag_len]` and values:
4229//
4230// ```
4231// diagonal[i, j, ..., l, n]
4232//   = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,
4233//     padding_value                 ; otherwise.
4234// ```
4235// where `y = max(-k[1], 0)`, `x = max(k[1], 0)`.
4236//
4237// Otherwise, the output tensor has rank `r` with dimensions
4238// `[I, J, ..., L, num_diags, max_diag_len]` with values:
4239//
4240// ```
4241// diagonal[i, j, ..., l, m, n]
4242//   = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,
4243//     padding_value                 ; otherwise.
4244// ```
4245// where `d = k[1] - m`, `y = max(-d, 0) - offset`, and `x = max(d, 0) - offset`.
4246//
4247// `offset` is zero except when the alignment of the diagonal is to the right.
4248// ```
4249// offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}
4250//                                            and `d >= 0`) or
4251//                                          (`align` in {LEFT_RIGHT, RIGHT_RIGHT}
4252//                                            and `d <= 0`)
4253//          0                          ; otherwise
4254// ```
4255// where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.
4256//
4257// The input must be at least a matrix.
4258//
4259// For example:
4260//
4261// ```
4262// input = np.array([[[1, 2, 3, 4],  # Input shape: (2, 3, 4)
4263//                    [5, 6, 7, 8],
4264//                    [9, 8, 7, 6]],
4265//                   [[5, 4, 3, 2],
4266//                    [1, 2, 3, 4],
4267//                    [5, 6, 7, 8]]])
4268//
4269// # A main diagonal from each batch.
4270// tf.matrix_diag_part(input) ==> [[1, 6, 7],  # Output shape: (2, 3)
4271//                                 [5, 2, 7]]
4272//
4273// # A superdiagonal from each batch.
4274// tf.matrix_diag_part(input, k = 1)
4275//   ==> [[2, 7, 6],  # Output shape: (2, 3)
4276//        [4, 3, 8]]
4277//
4278// # A band from each batch.
4279// tf.matrix_diag_part(input, k = (-1, 2))
4280//   ==> [[[0, 3, 8],  # Output shape: (2, 4, 3)
4281//         [2, 7, 6],
4282//         [1, 6, 7],
4283//         [5, 8, 0]],
4284//        [[0, 3, 4],
4285//         [4, 3, 8],
4286//         [5, 2, 7],
4287//         [1, 6, 0]]]
4288//
4289// # LEFT_RIGHT alignment.
4290// tf.matrix_diag_part(input, k = (-1, 2), align="LEFT_RIGHT")
4291//   ==> [[[3, 8, 0],  # Output shape: (2, 4, 3)
4292//         [2, 7, 6],
4293//         [1, 6, 7],
4294//         [0, 5, 8]],
4295//        [[3, 4, 0],
4296//         [4, 3, 8],
4297//         [5, 2, 7],
4298//         [0, 1, 6]]]
4299//
4300// # max_diag_len can be shorter than the main diagonal.
4301// tf.matrix_diag_part(input, k = (-2, -1))
4302//   ==> [[[5, 8],
4303//         [9, 0]],
4304//        [[1, 6],
4305//         [5, 0]]]
4306//
4307// # padding_value = 9
4308// tf.matrix_diag_part(input, k = (1, 3), padding_value = 9)
4309//   ==> [[[9, 9, 4],  # Output shape: (2, 3, 3)
4310//         [9, 3, 8],
4311//         [2, 7, 6]],
4312//        [[9, 9, 2],
4313//         [9, 3, 4],
4314//         [4, 3, 8]]]
4315//
4316// ```
4317//
4318// Arguments:
4319//	input: Rank `r` tensor where `r >= 2`.
4320//	k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
4321// diagonal, and negative value means subdiagonals. `k` can be a single integer
4322// (for a single diagonal) or a pair of integers specifying the low and high ends
4323// of a matrix band. `k[0]` must not be larger than `k[1]`.
4324//	padding_value: The value to fill the area outside the specified diagonal band with.
4325// Default is 0.
4326//
4327// Returns The extracted diagonal(s).
4328func MatrixDiagPartV3(scope *Scope, input tf.Output, k tf.Output, padding_value tf.Output, optional ...MatrixDiagPartV3Attr) (diagonal tf.Output) {
4329	if scope.Err() != nil {
4330		return
4331	}
4332	attrs := map[string]interface{}{}
4333	for _, a := range optional {
4334		a(attrs)
4335	}
4336	opspec := tf.OpSpec{
4337		Type: "MatrixDiagPartV3",
4338		Input: []tf.Input{
4339			input, k, padding_value,
4340		},
4341		Attrs: attrs,
4342	}
4343	op := scope.AddOperation(opspec)
4344	return op.Output(0)
4345}
4346
4347// LearnedUnigramCandidateSamplerAttr is an optional argument to LearnedUnigramCandidateSampler.
4348type LearnedUnigramCandidateSamplerAttr func(optionalAttr)
4349
4350// LearnedUnigramCandidateSamplerSeed sets the optional seed attribute to value.
4351//
4352// value: If either seed or seed2 are set to be non-zero, the random number
4353// generator is seeded by the given seed.  Otherwise, it is seeded by a
4354// random seed.
4355// If not specified, defaults to 0
4356func LearnedUnigramCandidateSamplerSeed(value int64) LearnedUnigramCandidateSamplerAttr {
4357	return func(m optionalAttr) {
4358		m["seed"] = value
4359	}
4360}
4361
4362// LearnedUnigramCandidateSamplerSeed2 sets the optional seed2 attribute to value.
4363//
4364// value: An second seed to avoid seed collision.
4365// If not specified, defaults to 0
4366func LearnedUnigramCandidateSamplerSeed2(value int64) LearnedUnigramCandidateSamplerAttr {
4367	return func(m optionalAttr) {
4368		m["seed2"] = value
4369	}
4370}
4371
4372// Generates labels for candidate sampling with a learned unigram distribution.
4373//
4374// See explanations of candidate sampling and the data formats at
4375// go/candidate-sampling.
4376//
4377// For each batch, this op picks a single set of sampled candidate labels.
4378//
4379// The advantages of sampling candidates per-batch are simplicity and the
4380// possibility of efficient dense matrix multiplication. The disadvantage is that
4381// the sampled candidates must be chosen independently of the context and of the
4382// true labels.
4383//
4384// Arguments:
4385//	true_classes: A batch_size * num_true matrix, in which each row contains the
4386// IDs of the num_true target_classes in the corresponding original label.
4387//	num_true: Number of true labels per context.
4388//	num_sampled: Number of candidates to randomly sample.
4389//	unique: If unique is true, we sample with rejection, so that all sampled
4390// candidates in a batch are unique. This requires some approximation to
4391// estimate the post-rejection sampling probabilities.
4392//	range_max: The sampler will sample integers from the interval [0, range_max).
4393//
4394// Returns:
4395//	sampled_candidates: A vector of length num_sampled, in which each element is
4396// the ID of a sampled candidate.
4397//	true_expected_count: A batch_size * num_true matrix, representing
4398// the number of times each candidate is expected to occur in a batch
4399// of sampled candidates. If unique=true, then this is a probability.
4400//	sampled_expected_count: A vector of length num_sampled, for each sampled
4401// candidate representing the number of times the candidate is expected
4402// to occur in a batch of sampled candidates.  If unique=true, then this is a
4403// probability.
4404func LearnedUnigramCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, range_max int64, optional ...LearnedUnigramCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output) {
4405	if scope.Err() != nil {
4406		return
4407	}
4408	attrs := map[string]interface{}{"num_true": num_true, "num_sampled": num_sampled, "unique": unique, "range_max": range_max}
4409	for _, a := range optional {
4410		a(attrs)
4411	}
4412	opspec := tf.OpSpec{
4413		Type: "LearnedUnigramCandidateSampler",
4414		Input: []tf.Input{
4415			true_classes,
4416		},
4417		Attrs: attrs,
4418	}
4419	op := scope.AddOperation(opspec)
4420	return op.Output(0), op.Output(1), op.Output(2)
4421}
4422
4423// LogUniformCandidateSamplerAttr is an optional argument to LogUniformCandidateSampler.
4424type LogUniformCandidateSamplerAttr func(optionalAttr)
4425
4426// LogUniformCandidateSamplerSeed sets the optional seed attribute to value.
4427//
4428// value: If either seed or seed2 are set to be non-zero, the random number
4429// generator is seeded by the given seed.  Otherwise, it is seeded by a
4430// random seed.
4431// If not specified, defaults to 0
4432func LogUniformCandidateSamplerSeed(value int64) LogUniformCandidateSamplerAttr {
4433	return func(m optionalAttr) {
4434		m["seed"] = value
4435	}
4436}
4437
4438// LogUniformCandidateSamplerSeed2 sets the optional seed2 attribute to value.
4439//
4440// value: An second seed to avoid seed collision.
4441// If not specified, defaults to 0
4442func LogUniformCandidateSamplerSeed2(value int64) LogUniformCandidateSamplerAttr {
4443	return func(m optionalAttr) {
4444		m["seed2"] = value
4445	}
4446}
4447
4448// Generates labels for candidate sampling with a log-uniform distribution.
4449//
4450// See explanations of candidate sampling and the data formats at
4451// go/candidate-sampling.
4452//
4453// For each batch, this op picks a single set of sampled candidate labels.
4454//
4455// The advantages of sampling candidates per-batch are simplicity and the
4456// possibility of efficient dense matrix multiplication. The disadvantage is that
4457// the sampled candidates must be chosen independently of the context and of the
4458// true labels.
4459//
4460// Arguments:
4461//	true_classes: A batch_size * num_true matrix, in which each row contains the
4462// IDs of the num_true target_classes in the corresponding original label.
4463//	num_true: Number of true labels per context.
4464//	num_sampled: Number of candidates to randomly sample.
4465//	unique: If unique is true, we sample with rejection, so that all sampled
4466// candidates in a batch are unique. This requires some approximation to
4467// estimate the post-rejection sampling probabilities.
4468//	range_max: The sampler will sample integers from the interval [0, range_max).
4469//
4470// Returns:
4471//	sampled_candidates: A vector of length num_sampled, in which each element is
4472// the ID of a sampled candidate.
4473//	true_expected_count: A batch_size * num_true matrix, representing
4474// the number of times each candidate is expected to occur in a batch
4475// of sampled candidates. If unique=true, then this is a probability.
4476//	sampled_expected_count: A vector of length num_sampled, for each sampled
4477// candidate representing the number of times the candidate is expected
4478// to occur in a batch of sampled candidates.  If unique=true, then this is a
4479// probability.
4480func LogUniformCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, range_max int64, optional ...LogUniformCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output) {
4481	if scope.Err() != nil {
4482		return
4483	}
4484	attrs := map[string]interface{}{"num_true": num_true, "num_sampled": num_sampled, "unique": unique, "range_max": range_max}
4485	for _, a := range optional {
4486		a(attrs)
4487	}
4488	opspec := tf.OpSpec{
4489		Type: "LogUniformCandidateSampler",
4490		Input: []tf.Input{
4491			true_classes,
4492		},
4493		Attrs: attrs,
4494	}
4495	op := scope.AddOperation(opspec)
4496	return op.Output(0), op.Output(1), op.Output(2)
4497}
4498
4499// Selects the k nearest centers for each point.
4500//
4501// Rows of points are assumed to be input points. Rows of centers are assumed to be
4502// the list of candidate centers. For each point, the k centers that have least L2
4503// distance to it are computed.
4504//
4505// Arguments:
4506//	points: Matrix of shape (n, d). Rows are assumed to be input points.
4507//	centers: Matrix of shape (m, d). Rows are assumed to be centers.
4508//	k: Number of nearest centers to return for each point. If k is larger than m, then
4509// only m centers are returned.
4510//
4511// Returns:
4512//	nearest_center_indices: Matrix of shape (n, min(m, k)). Each row contains the indices of the centers
4513// closest to the corresponding point, ordered by increasing distance.
4514//	nearest_center_distances: Matrix of shape (n, min(m, k)). Each row contains the squared L2 distance to the
4515// corresponding center in nearest_center_indices.
4516func NearestNeighbors(scope *Scope, points tf.Output, centers tf.Output, k tf.Output) (nearest_center_indices tf.Output, nearest_center_distances tf.Output) {
4517	if scope.Err() != nil {
4518		return
4519	}
4520	opspec := tf.OpSpec{
4521		Type: "NearestNeighbors",
4522		Input: []tf.Input{
4523			points, centers, k,
4524		},
4525	}
4526	op := scope.AddOperation(opspec)
4527	return op.Output(0), op.Output(1)
4528}
4529
4530// Selects num_to_sample rows of input using the KMeans++ criterion.
4531//
4532// Rows of points are assumed to be input points. One row is selected at random.
4533// Subsequent rows are sampled with probability proportional to the squared L2
4534// distance from the nearest row selected thus far till num_to_sample rows have
4535// been sampled.
4536//
4537// Arguments:
4538//	points: Matrix of shape (n, d). Rows are assumed to be input points.
4539//	num_to_sample: Scalar. The number of rows to sample. This value must not be larger than n.
4540//	seed: Scalar. Seed for initializing the random number generator.
4541//	num_retries_per_sample: Scalar. For each row that is sampled, this parameter
4542// specifies the number of additional points to draw from the current
4543// distribution before selecting the best. If a negative value is specified, a
4544// heuristic is used to sample O(log(num_to_sample)) additional points.
4545//
4546// Returns Matrix of shape (num_to_sample, d). The sampled rows.
4547func KmeansPlusPlusInitialization(scope *Scope, points tf.Output, num_to_sample tf.Output, seed tf.Output, num_retries_per_sample tf.Output) (samples tf.Output) {
4548	if scope.Err() != nil {
4549		return
4550	}
4551	opspec := tf.OpSpec{
4552		Type: "KmeansPlusPlusInitialization",
4553		Input: []tf.Input{
4554			points, num_to_sample, seed, num_retries_per_sample,
4555		},
4556	}
4557	op := scope.AddOperation(opspec)
4558	return op.Output(0)
4559}
4560
4561// CollectiveBcastRecvV2Attr is an optional argument to CollectiveBcastRecvV2.
4562type CollectiveBcastRecvV2Attr func(optionalAttr)
4563
4564// CollectiveBcastRecvV2CommunicationHint sets the optional communication_hint attribute to value.
4565// If not specified, defaults to "auto"
4566func CollectiveBcastRecvV2CommunicationHint(value string) CollectiveBcastRecvV2Attr {
4567	return func(m optionalAttr) {
4568		m["communication_hint"] = value
4569	}
4570}
4571
4572// CollectiveBcastRecvV2TimeoutSeconds sets the optional timeout_seconds attribute to value.
4573// If not specified, defaults to 0
4574func CollectiveBcastRecvV2TimeoutSeconds(value float32) CollectiveBcastRecvV2Attr {
4575	return func(m optionalAttr) {
4576		m["timeout_seconds"] = value
4577	}
4578}
4579
4580// Receives a tensor value broadcast from another device.
4581func CollectiveBcastRecvV2(scope *Scope, group_size tf.Output, group_key tf.Output, instance_key tf.Output, shape tf.Output, T tf.DataType, optional ...CollectiveBcastRecvV2Attr) (data tf.Output) {
4582	if scope.Err() != nil {
4583		return
4584	}
4585	attrs := map[string]interface{}{"T": T}
4586	for _, a := range optional {
4587		a(attrs)
4588	}
4589	opspec := tf.OpSpec{
4590		Type: "CollectiveBcastRecvV2",
4591		Input: []tf.Input{
4592			group_size, group_key, instance_key, shape,
4593		},
4594		Attrs: attrs,
4595	}
4596	op := scope.AddOperation(opspec)
4597	return op.Output(0)
4598}
4599
4600// AbortAttr is an optional argument to Abort.
4601type AbortAttr func(optionalAttr)
4602
4603// AbortErrorMsg sets the optional error_msg attribute to value.
4604//
4605// value: A string which is the message associated with the exception.
4606// If not specified, defaults to ""
4607func AbortErrorMsg(value string) AbortAttr {
4608	return func(m optionalAttr) {
4609		m["error_msg"] = value
4610	}
4611}
4612
4613// AbortExitWithoutError sets the optional exit_without_error attribute to value.
4614// If not specified, defaults to false
4615func AbortExitWithoutError(value bool) AbortAttr {
4616	return func(m optionalAttr) {
4617		m["exit_without_error"] = value
4618	}
4619}
4620
4621// Raise a exception to abort the process when called.
4622//
4623// If exit_without_error is true, the process will exit normally,
4624// otherwise it will exit with a SIGABORT signal.
4625//
4626// Returns nothing but an exception.
4627//
4628// Returns the created operation.
4629func Abort(scope *Scope, optional ...AbortAttr) (o *tf.Operation) {
4630	if scope.Err() != nil {
4631		return
4632	}
4633	attrs := map[string]interface{}{}
4634	for _, a := range optional {
4635		a(attrs)
4636	}
4637	opspec := tf.OpSpec{
4638		Type: "Abort",
4639
4640		Attrs: attrs,
4641	}
4642	return scope.AddOperation(opspec)
4643}
4644
4645// Makes its input available to the next iteration.
4646//
4647// Arguments:
4648//	data: The tensor to be made available to the next iteration.
4649//
4650// Returns The same tensor as `data`.
4651func NextIteration(scope *Scope, data tf.Output) (output tf.Output) {
4652	if scope.Err() != nil {
4653		return
4654	}
4655	opspec := tf.OpSpec{
4656		Type: "NextIteration",
4657		Input: []tf.Input{
4658			data,
4659		},
4660	}
4661	op := scope.AddOperation(opspec)
4662	return op.Output(0)
4663}
4664
4665// Exits the current frame to its parent frame.
4666//
4667// Exit makes its input `data` available to the parent frame.
4668//
4669// Arguments:
4670//	data: The tensor to be made available to the parent frame.
4671//
4672// Returns The same tensor as `data`.
4673func Exit(scope *Scope, data tf.Output) (output tf.Output) {
4674	if scope.Err() != nil {
4675		return
4676	}
4677	opspec := tf.OpSpec{
4678		Type: "Exit",
4679		Input: []tf.Input{
4680			data,
4681		},
4682	}
4683	op := scope.AddOperation(opspec)
4684	return op.Output(0)
4685}
4686
4687// EnterAttr is an optional argument to Enter.
4688type EnterAttr func(optionalAttr)
4689
4690// EnterIsConstant sets the optional is_constant attribute to value.
4691//
4692// value: If true, the output is constant within the child frame.
4693// If not specified, defaults to false
4694func EnterIsConstant(value bool) EnterAttr {
4695	return func(m optionalAttr) {
4696		m["is_constant"] = value
4697	}
4698}
4699
4700// EnterParallelIterations sets the optional parallel_iterations attribute to value.
4701//
4702// value: The number of iterations allowed to run in parallel.
4703// If not specified, defaults to 10
4704func EnterParallelIterations(value int64) EnterAttr {
4705	return func(m optionalAttr) {
4706		m["parallel_iterations"] = value
4707	}
4708}
4709
4710// Creates or finds a child frame, and makes `data` available to the child frame.
4711//
4712// This op is used together with `Exit` to create loops in the graph.
4713// The unique `frame_name` is used by the `Executor` to identify frames. If
4714// `is_constant` is true, `output` is a constant in the child frame; otherwise
4715// it may be changed in the child frame. At most `parallel_iterations` iterations
4716// are run in parallel in the child frame.
4717//
4718// Arguments:
4719//	data: The tensor to be made available to the child frame.
4720//	frame_name: The name of the child frame.
4721//
4722// Returns The same tensor as `data`.
4723func Enter(scope *Scope, data tf.Output, frame_name string, optional ...EnterAttr) (output tf.Output) {
4724	if scope.Err() != nil {
4725		return
4726	}
4727	attrs := map[string]interface{}{"frame_name": frame_name}
4728	for _, a := range optional {
4729		a(attrs)
4730	}
4731	opspec := tf.OpSpec{
4732		Type: "Enter",
4733		Input: []tf.Input{
4734			data,
4735		},
4736		Attrs: attrs,
4737	}
4738	op := scope.AddOperation(opspec)
4739	return op.Output(0)
4740}
4741
4742// DenseCountSparseOutputAttr is an optional argument to DenseCountSparseOutput.
4743type DenseCountSparseOutputAttr func(optionalAttr)
4744
4745// DenseCountSparseOutputMinlength sets the optional minlength attribute to value.
4746//
4747// value: Minimum value to count. Can be set to -1 for no minimum.
4748// If not specified, defaults to -1
4749//
4750// REQUIRES: value >= -1
4751func DenseCountSparseOutputMinlength(value int64) DenseCountSparseOutputAttr {
4752	return func(m optionalAttr) {
4753		m["minlength"] = value
4754	}
4755}
4756
4757// DenseCountSparseOutputMaxlength sets the optional maxlength attribute to value.
4758//
4759// value: Maximum value to count. Can be set to -1 for no maximum.
4760// If not specified, defaults to -1
4761//
4762// REQUIRES: value >= -1
4763func DenseCountSparseOutputMaxlength(value int64) DenseCountSparseOutputAttr {
4764	return func(m optionalAttr) {
4765		m["maxlength"] = value
4766	}
4767}
4768
4769// Performs sparse-output bin counting for a tf.tensor input.
4770//
4771//   Counts the number of times each value occurs in the input.
4772//
4773// Arguments:
4774//	values: Tensor containing data to count.
4775//	weights: A Tensor of the same shape as indices containing per-index weight values. May
4776// also be the empty tensor if no weights are used.
4777//	binary_output: Whether to output the number of occurrences of each value or 1.
4778//
4779// Returns:
4780//	output_indices: Indices tensor for the resulting sparse tensor object.
4781//	output_values: Values tensor for the resulting sparse tensor object.
4782//	output_dense_shape: Shape tensor for the resulting sparse tensor object.
4783func DenseCountSparseOutput(scope *Scope, values tf.Output, weights tf.Output, binary_output bool, optional ...DenseCountSparseOutputAttr) (output_indices tf.Output, output_values tf.Output, output_dense_shape tf.Output) {
4784	if scope.Err() != nil {
4785		return
4786	}
4787	attrs := map[string]interface{}{"binary_output": binary_output}
4788	for _, a := range optional {
4789		a(attrs)
4790	}
4791	opspec := tf.OpSpec{
4792		Type: "DenseCountSparseOutput",
4793		Input: []tf.Input{
4794			values, weights,
4795		},
4796		Attrs: attrs,
4797	}
4798	op := scope.AddOperation(opspec)
4799	return op.Output(0), op.Output(1), op.Output(2)
4800}
4801
4802// CTCBeamSearchDecoderAttr is an optional argument to CTCBeamSearchDecoder.
4803type CTCBeamSearchDecoderAttr func(optionalAttr)
4804
4805// CTCBeamSearchDecoderMergeRepeated sets the optional merge_repeated attribute to value.
4806//
4807// value: If true, merge repeated classes in output.
4808// If not specified, defaults to true
4809func CTCBeamSearchDecoderMergeRepeated(value bool) CTCBeamSearchDecoderAttr {
4810	return func(m optionalAttr) {
4811		m["merge_repeated"] = value
4812	}
4813}
4814
4815// Performs beam search decoding on the logits given in input.
4816//
4817// A note about the attribute merge_repeated: For the beam search decoder,
4818// this means that if consecutive entries in a beam are the same, only
4819// the first of these is emitted.  That is, when the top path is "A B B B B",
4820// "A B" is returned if merge_repeated = True but "A B B B B" is
4821// returned if merge_repeated = False.
4822//
4823// Arguments:
4824//	inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.
4825//	sequence_length: A vector containing sequence lengths, size `(batch)`.
4826//	beam_width: A scalar >= 0 (beam search beam width).
4827//	top_paths: A scalar >= 0, <= beam_width (controls output size).
4828//
4829// Returns:
4830//	decoded_indices: A list (length: top_paths) of indices matrices.  Matrix j,
4831// size `(total_decoded_outputs[j] x 2)`, has indices of a
4832// `SparseTensor<int64, 2>`.  The rows store: [batch, time].
4833//	decoded_values: A list (length: top_paths) of values vectors.  Vector j,
4834// size `(length total_decoded_outputs[j])`, has the values of a
4835// `SparseTensor<int64, 2>`.  The vector stores the decoded classes for beam j.
4836//	decoded_shape: A list (length: top_paths) of shape vector.  Vector j,
4837// size `(2)`, stores the shape of the decoded `SparseTensor[j]`.
4838// Its values are: `[batch_size, max_decoded_length[j]]`.
4839//	log_probability: A matrix, shaped: `(batch_size x top_paths)`.  The
4840// sequence log-probabilities.
4841func CTCBeamSearchDecoder(scope *Scope, inputs tf.Output, sequence_length tf.Output, beam_width int64, top_paths int64, optional ...CTCBeamSearchDecoderAttr) (decoded_indices []tf.Output, decoded_values []tf.Output, decoded_shape []tf.Output, log_probability tf.Output) {
4842	if scope.Err() != nil {
4843		return
4844	}
4845	attrs := map[string]interface{}{"beam_width": beam_width, "top_paths": top_paths}
4846	for _, a := range optional {
4847		a(attrs)
4848	}
4849	opspec := tf.OpSpec{
4850		Type: "CTCBeamSearchDecoder",
4851		Input: []tf.Input{
4852			inputs, sequence_length,
4853		},
4854		Attrs: attrs,
4855	}
4856	op := scope.AddOperation(opspec)
4857	if scope.Err() != nil {
4858		return
4859	}
4860	var idx int
4861	var err error
4862	if decoded_indices, idx, err = makeOutputList(op, idx, "decoded_indices"); err != nil {
4863		scope.UpdateErr("CTCBeamSearchDecoder", err)
4864		return
4865	}
4866	if decoded_values, idx, err = makeOutputList(op, idx, "decoded_values"); err != nil {
4867		scope.UpdateErr("CTCBeamSearchDecoder", err)
4868		return
4869	}
4870	if decoded_shape, idx, err = makeOutputList(op, idx, "decoded_shape"); err != nil {
4871		scope.UpdateErr("CTCBeamSearchDecoder", err)
4872		return
4873	}
4874	log_probability = op.Output(idx)
4875	return decoded_indices, decoded_values, decoded_shape, log_probability
4876}
4877
4878// CTCGreedyDecoderAttr is an optional argument to CTCGreedyDecoder.
4879type CTCGreedyDecoderAttr func(optionalAttr)
4880
4881// CTCGreedyDecoderMergeRepeated sets the optional merge_repeated attribute to value.
4882//
4883// value: If True, merge repeated classes in output.
4884// If not specified, defaults to false
4885func CTCGreedyDecoderMergeRepeated(value bool) CTCGreedyDecoderAttr {
4886	return func(m optionalAttr) {
4887		m["merge_repeated"] = value
4888	}
4889}
4890
4891// Performs greedy decoding on the logits given in inputs.
4892//
4893// A note about the attribute merge_repeated: if enabled, when
4894// consecutive logits' maximum indices are the same, only the first of
4895// these is emitted.  Labeling the blank '*', the sequence "A B B * B B"
4896// becomes "A B B" if merge_repeated = True and "A B B B B" if
4897// merge_repeated = False.
4898//
4899// Regardless of the value of merge_repeated, if the maximum index of a given
4900// time and batch corresponds to the blank, index `(num_classes - 1)`, no new
4901// element is emitted.
4902//
4903// Arguments:
4904//	inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.
4905//	sequence_length: A vector containing sequence lengths, size `(batch_size)`.
4906//
4907// Returns:
4908//	decoded_indices: Indices matrix, size `(total_decoded_outputs x 2)`,
4909// of a `SparseTensor<int64, 2>`.  The rows store: [batch, time].
4910//	decoded_values: Values vector, size: `(total_decoded_outputs)`,
4911// of a `SparseTensor<int64, 2>`.  The vector stores the decoded classes.
4912//	decoded_shape: Shape vector, size `(2)`, of the decoded SparseTensor.
4913// Values are: `[batch_size, max_decoded_length]`.
4914//	log_probability: Matrix, size `(batch_size x 1)`, containing sequence
4915// log-probabilities.
4916func CTCGreedyDecoder(scope *Scope, inputs tf.Output, sequence_length tf.Output, optional ...CTCGreedyDecoderAttr) (decoded_indices tf.Output, decoded_values tf.Output, decoded_shape tf.Output, log_probability tf.Output) {
4917	if scope.Err() != nil {
4918		return
4919	}
4920	attrs := map[string]interface{}{}
4921	for _, a := range optional {
4922		a(attrs)
4923	}
4924	opspec := tf.OpSpec{
4925		Type: "CTCGreedyDecoder",
4926		Input: []tf.Input{
4927			inputs, sequence_length,
4928		},
4929		Attrs: attrs,
4930	}
4931	op := scope.AddOperation(opspec)
4932	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
4933}
4934
4935// CTCLossAttr is an optional argument to CTCLoss.
4936type CTCLossAttr func(optionalAttr)
4937
4938// CTCLossPreprocessCollapseRepeated sets the optional preprocess_collapse_repeated attribute to value.
4939//
4940// value: Scalar, if true then repeated labels are
4941// collapsed prior to the CTC calculation.
4942// If not specified, defaults to false
4943func CTCLossPreprocessCollapseRepeated(value bool) CTCLossAttr {
4944	return func(m optionalAttr) {
4945		m["preprocess_collapse_repeated"] = value
4946	}
4947}
4948
4949// CTCLossCtcMergeRepeated sets the optional ctc_merge_repeated attribute to value.
4950//
4951// value: Scalar.  If set to false, *during* CTC calculation
4952// repeated non-blank labels will not be merged and are interpreted as
4953// individual labels.  This is a simplified version of CTC.
4954// If not specified, defaults to true
4955func CTCLossCtcMergeRepeated(value bool) CTCLossAttr {
4956	return func(m optionalAttr) {
4957		m["ctc_merge_repeated"] = value
4958	}
4959}
4960
4961// CTCLossIgnoreLongerOutputsThanInputs sets the optional ignore_longer_outputs_than_inputs attribute to value.
4962//
4963// value: Scalar. If set to true, during CTC
4964// calculation, items that have longer output sequences than input sequences
4965// are skipped: they don't contribute to the loss term and have zero-gradient.
4966// If not specified, defaults to false
4967func CTCLossIgnoreLongerOutputsThanInputs(value bool) CTCLossAttr {
4968	return func(m optionalAttr) {
4969		m["ignore_longer_outputs_than_inputs"] = value
4970	}
4971}
4972
4973// Calculates the CTC Loss (log probability) for each batch entry.  Also calculates
4974//
4975// the gradient.  This class performs the softmax operation for you, so inputs
4976// should be e.g. linear projections of outputs by an LSTM.
4977//
4978// Arguments:
4979//	inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.
4980//	labels_indices: The indices of a `SparseTensor<int32, 2>`.
4981// `labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for
4982// `(batch b, time t)`.
4983//	labels_values: The values (labels) associated with the given batch and time.
4984//	sequence_length: A vector containing sequence lengths (batch).
4985//
4986// Returns:
4987//	loss: A vector (batch) containing log-probabilities.
4988//	gradient: The gradient of `loss`.  3-D, shape:
4989// `(max_time x batch_size x num_classes)`.
4990func CTCLoss(scope *Scope, inputs tf.Output, labels_indices tf.Output, labels_values tf.Output, sequence_length tf.Output, optional ...CTCLossAttr) (loss tf.Output, gradient tf.Output) {
4991	if scope.Err() != nil {
4992		return
4993	}
4994	attrs := map[string]interface{}{}
4995	for _, a := range optional {
4996		a(attrs)
4997	}
4998	opspec := tf.OpSpec{
4999		Type: "CTCLoss",
5000		Input: []tf.Input{
5001			inputs, labels_indices, labels_values, sequence_length,
5002		},
5003		Attrs: attrs,
5004	}
5005	op := scope.AddOperation(opspec)
5006	return op.Output(0), op.Output(1)
5007}
5008
5009// CudnnRNNCanonicalToParamsAttr is an optional argument to CudnnRNNCanonicalToParams.
5010type CudnnRNNCanonicalToParamsAttr func(optionalAttr)
5011
5012// CudnnRNNCanonicalToParamsRnnMode sets the optional rnn_mode attribute to value.
5013// If not specified, defaults to "lstm"
5014func CudnnRNNCanonicalToParamsRnnMode(value string) CudnnRNNCanonicalToParamsAttr {
5015	return func(m optionalAttr) {
5016		m["rnn_mode"] = value
5017	}
5018}
5019
5020// CudnnRNNCanonicalToParamsInputMode sets the optional input_mode attribute to value.
5021// If not specified, defaults to "linear_input"
5022func CudnnRNNCanonicalToParamsInputMode(value string) CudnnRNNCanonicalToParamsAttr {
5023	return func(m optionalAttr) {
5024		m["input_mode"] = value
5025	}
5026}
5027
5028// CudnnRNNCanonicalToParamsDirection sets the optional direction attribute to value.
5029// If not specified, defaults to "unidirectional"
5030func CudnnRNNCanonicalToParamsDirection(value string) CudnnRNNCanonicalToParamsAttr {
5031	return func(m optionalAttr) {
5032		m["direction"] = value
5033	}
5034}
5035
5036// CudnnRNNCanonicalToParamsDropout sets the optional dropout attribute to value.
5037// If not specified, defaults to 0
5038func CudnnRNNCanonicalToParamsDropout(value float32) CudnnRNNCanonicalToParamsAttr {
5039	return func(m optionalAttr) {
5040		m["dropout"] = value
5041	}
5042}
5043
5044// CudnnRNNCanonicalToParamsSeed sets the optional seed attribute to value.
5045// If not specified, defaults to 0
5046func CudnnRNNCanonicalToParamsSeed(value int64) CudnnRNNCanonicalToParamsAttr {
5047	return func(m optionalAttr) {
5048		m["seed"] = value
5049	}
5050}
5051
5052// CudnnRNNCanonicalToParamsSeed2 sets the optional seed2 attribute to value.
5053// If not specified, defaults to 0
5054func CudnnRNNCanonicalToParamsSeed2(value int64) CudnnRNNCanonicalToParamsAttr {
5055	return func(m optionalAttr) {
5056		m["seed2"] = value
5057	}
5058}
5059
5060// Converts CudnnRNN params from canonical form to usable form.
5061//
5062// Writes a set of weights into the opaque params buffer so they can be used in
5063// upcoming training or inferences.
5064//
5065// Note that the params buffer may not be compatible across different GPUs. So any
5066// save and restoration should be converted to and from the canonical weights and
5067// biases.
5068//
5069// num_layers: Specifies the number of layers in the RNN model.
5070// num_units: Specifies the size of the hidden state.
5071// input_size: Specifies the size of the input state.
5072// weights: the canonical form of weights that can be used for saving
5073//     and restoration. They are more likely to be compatible across different
5074//     generations.
5075// biases: the canonical form of biases that can be used for saving
5076//     and restoration. They are more likely to be compatible across different
5077//     generations.
5078// num_params: number of parameter sets for all layers.
5079//     Each layer may contain multiple parameter sets, with each set consisting of
5080//     a weight matrix and a bias vector.
5081// rnn_mode: Indicates the type of the RNN model.
5082// input_mode: Indicate whether there is a linear projection between the input and
5083//     The actual computation before the first layer. 'skip_input' is only allowed
5084//     when input_size == num_units; 'auto_select' implies 'skip_input' when
5085//     input_size == num_units; otherwise, it implies 'linear_input'.
5086// direction: Indicates whether a bidirectional model will be used.
5087//     dir = (direction == bidirectional) ? 2 : 1
5088// dropout: dropout probability. When set to 0., dropout is disabled.
5089// seed: the 1st part of a seed to initialize dropout.
5090// seed2: the 2nd part of a seed to initialize dropout.
5091func CudnnRNNCanonicalToParams(scope *Scope, num_layers tf.Output, num_units tf.Output, input_size tf.Output, weights []tf.Output, biases []tf.Output, optional ...CudnnRNNCanonicalToParamsAttr) (params tf.Output) {
5092	if scope.Err() != nil {
5093		return
5094	}
5095	attrs := map[string]interface{}{}
5096	for _, a := range optional {
5097		a(attrs)
5098	}
5099	opspec := tf.OpSpec{
5100		Type: "CudnnRNNCanonicalToParams",
5101		Input: []tf.Input{
5102			num_layers, num_units, input_size, tf.OutputList(weights), tf.OutputList(biases),
5103		},
5104		Attrs: attrs,
5105	}
5106	op := scope.AddOperation(opspec)
5107	return op.Output(0)
5108}
5109
5110// CudnnRNNParamsToCanonicalV2Attr is an optional argument to CudnnRNNParamsToCanonicalV2.
5111type CudnnRNNParamsToCanonicalV2Attr func(optionalAttr)
5112
5113// CudnnRNNParamsToCanonicalV2RnnMode sets the optional rnn_mode attribute to value.
5114// If not specified, defaults to "lstm"
5115func CudnnRNNParamsToCanonicalV2RnnMode(value string) CudnnRNNParamsToCanonicalV2Attr {
5116	return func(m optionalAttr) {
5117		m["rnn_mode"] = value
5118	}
5119}
5120
5121// CudnnRNNParamsToCanonicalV2InputMode sets the optional input_mode attribute to value.
5122// If not specified, defaults to "linear_input"
5123func CudnnRNNParamsToCanonicalV2InputMode(value string) CudnnRNNParamsToCanonicalV2Attr {
5124	return func(m optionalAttr) {
5125		m["input_mode"] = value
5126	}
5127}
5128
5129// CudnnRNNParamsToCanonicalV2Direction sets the optional direction attribute to value.
5130// If not specified, defaults to "unidirectional"
5131func CudnnRNNParamsToCanonicalV2Direction(value string) CudnnRNNParamsToCanonicalV2Attr {
5132	return func(m optionalAttr) {
5133		m["direction"] = value
5134	}
5135}
5136
5137// CudnnRNNParamsToCanonicalV2Dropout sets the optional dropout attribute to value.
5138// If not specified, defaults to 0
5139func CudnnRNNParamsToCanonicalV2Dropout(value float32) CudnnRNNParamsToCanonicalV2Attr {
5140	return func(m optionalAttr) {
5141		m["dropout"] = value
5142	}
5143}
5144
5145// CudnnRNNParamsToCanonicalV2Seed sets the optional seed attribute to value.
5146// If not specified, defaults to 0
5147func CudnnRNNParamsToCanonicalV2Seed(value int64) CudnnRNNParamsToCanonicalV2Attr {
5148	return func(m optionalAttr) {
5149		m["seed"] = value
5150	}
5151}
5152
5153// CudnnRNNParamsToCanonicalV2Seed2 sets the optional seed2 attribute to value.
5154// If not specified, defaults to 0
5155func CudnnRNNParamsToCanonicalV2Seed2(value int64) CudnnRNNParamsToCanonicalV2Attr {
5156	return func(m optionalAttr) {
5157		m["seed2"] = value
5158	}
5159}
5160
5161// CudnnRNNParamsToCanonicalV2NumProj sets the optional num_proj attribute to value.
5162// If not specified, defaults to 0
5163func CudnnRNNParamsToCanonicalV2NumProj(value int64) CudnnRNNParamsToCanonicalV2Attr {
5164	return func(m optionalAttr) {
5165		m["num_proj"] = value
5166	}
5167}
5168
5169// Retrieves CudnnRNN params in canonical form. It supports the projection in LSTM.
5170//
5171// Retrieves a set of weights from the opaque params buffer that can be saved and
5172// restored in a way compatible with future runs.
5173//
5174// Note that the params buffer may not be compatible across different GPUs. So any
5175// save and restoration should be converted to and from the canonical weights and
5176// biases.
5177//
5178// num_layers: Specifies the number of layers in the RNN model.
5179// num_units: Specifies the size of the hidden state.
5180// input_size: Specifies the size of the input state.
5181// num_params_weights: number of weight parameter matrix for all layers.
5182// num_params_biases: number of bias parameter vector for all layers.
5183// weights: the canonical form of weights that can be used for saving
5184//     and restoration. They are more likely to be compatible across different
5185//     generations.
5186// biases: the canonical form of biases that can be used for saving
5187//     and restoration. They are more likely to be compatible across different
5188//     generations.
5189// rnn_mode: Indicates the type of the RNN model.
5190// input_mode: Indicate whether there is a linear projection between the input and
5191//     The actual computation before the first layer. 'skip_input' is only allowed
5192//     when input_size == num_units; 'auto_select' implies 'skip_input' when
5193//     input_size == num_units; otherwise, it implies 'linear_input'.
5194// direction: Indicates whether a bidirectional model will be used.
5195//     dir = (direction == bidirectional) ? 2 : 1
5196// dropout: dropout probability. When set to 0., dropout is disabled.
5197// seed: the 1st part of a seed to initialize dropout.
5198// seed2: the 2nd part of a seed to initialize dropout.
5199// num_proj: The output dimensionality for the projection matrices. If None or 0,
5200//     no projection is performed.
5201func CudnnRNNParamsToCanonicalV2(scope *Scope, num_layers tf.Output, num_units tf.Output, input_size tf.Output, params tf.Output, num_params_weights int64, num_params_biases int64, optional ...CudnnRNNParamsToCanonicalV2Attr) (weights []tf.Output, biases []tf.Output) {
5202	if scope.Err() != nil {
5203		return
5204	}
5205	attrs := map[string]interface{}{"num_params_weights": num_params_weights, "num_params_biases": num_params_biases}
5206	for _, a := range optional {
5207		a(attrs)
5208	}
5209	opspec := tf.OpSpec{
5210		Type: "CudnnRNNParamsToCanonicalV2",
5211		Input: []tf.Input{
5212			num_layers, num_units, input_size, params,
5213		},
5214		Attrs: attrs,
5215	}
5216	op := scope.AddOperation(opspec)
5217	if scope.Err() != nil {
5218		return
5219	}
5220	var idx int
5221	var err error
5222	if weights, idx, err = makeOutputList(op, idx, "weights"); err != nil {
5223		scope.UpdateErr("CudnnRNNParamsToCanonicalV2", err)
5224		return
5225	}
5226	if biases, idx, err = makeOutputList(op, idx, "biases"); err != nil {
5227		scope.UpdateErr("CudnnRNNParamsToCanonicalV2", err)
5228		return
5229	}
5230	return weights, biases
5231}
5232
5233// Returns the diagonal part of the tensor.
5234//
5235// This operation returns a tensor with the `diagonal` part
5236// of the `input`. The `diagonal` part is computed as follows:
5237//
5238// Assume `input` has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a
5239// tensor of rank `k` with dimensions `[D1,..., Dk]` where:
5240//
5241// `diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`.
5242//
5243// For example:
5244//
5245// ```
5246// # 'input' is [[1, 0, 0, 0]
5247//               [0, 2, 0, 0]
5248//               [0, 0, 3, 0]
5249//               [0, 0, 0, 4]]
5250//
5251// tf.diag_part(input) ==> [1, 2, 3, 4]
5252// ```
5253//
5254// Arguments:
5255//	input: Rank k tensor where k is even and not zero.
5256//
5257// Returns The extracted diagonal.
5258func DiagPart(scope *Scope, input tf.Output) (diagonal tf.Output) {
5259	if scope.Err() != nil {
5260		return
5261	}
5262	opspec := tf.OpSpec{
5263		Type: "DiagPart",
5264		Input: []tf.Input{
5265			input,
5266		},
5267	}
5268	op := scope.AddOperation(opspec)
5269	return op.Output(0)
5270}
5271
5272// CudnnRNNParamsToCanonicalAttr is an optional argument to CudnnRNNParamsToCanonical.
5273type CudnnRNNParamsToCanonicalAttr func(optionalAttr)
5274
5275// CudnnRNNParamsToCanonicalRnnMode sets the optional rnn_mode attribute to value.
5276// If not specified, defaults to "lstm"
5277func CudnnRNNParamsToCanonicalRnnMode(value string) CudnnRNNParamsToCanonicalAttr {
5278	return func(m optionalAttr) {
5279		m["rnn_mode"] = value
5280	}
5281}
5282
5283// CudnnRNNParamsToCanonicalInputMode sets the optional input_mode attribute to value.
5284// If not specified, defaults to "linear_input"
5285func CudnnRNNParamsToCanonicalInputMode(value string) CudnnRNNParamsToCanonicalAttr {
5286	return func(m optionalAttr) {
5287		m["input_mode"] = value
5288	}
5289}
5290
5291// CudnnRNNParamsToCanonicalDirection sets the optional direction attribute to value.
5292// If not specified, defaults to "unidirectional"
5293func CudnnRNNParamsToCanonicalDirection(value string) CudnnRNNParamsToCanonicalAttr {
5294	return func(m optionalAttr) {
5295		m["direction"] = value
5296	}
5297}
5298
5299// CudnnRNNParamsToCanonicalDropout sets the optional dropout attribute to value.
5300// If not specified, defaults to 0
5301func CudnnRNNParamsToCanonicalDropout(value float32) CudnnRNNParamsToCanonicalAttr {
5302	return func(m optionalAttr) {
5303		m["dropout"] = value
5304	}
5305}
5306
5307// CudnnRNNParamsToCanonicalSeed sets the optional seed attribute to value.
5308// If not specified, defaults to 0
5309func CudnnRNNParamsToCanonicalSeed(value int64) CudnnRNNParamsToCanonicalAttr {
5310	return func(m optionalAttr) {
5311		m["seed"] = value
5312	}
5313}
5314
5315// CudnnRNNParamsToCanonicalSeed2 sets the optional seed2 attribute to value.
5316// If not specified, defaults to 0
5317func CudnnRNNParamsToCanonicalSeed2(value int64) CudnnRNNParamsToCanonicalAttr {
5318	return func(m optionalAttr) {
5319		m["seed2"] = value
5320	}
5321}
5322
5323// Retrieves CudnnRNN params in canonical form.
5324//
5325// Retrieves a set of weights from the opaque params buffer that can be saved and
5326// restored in a way compatible with future runs.
5327//
5328// Note that the params buffer may not be compatible across different GPUs. So any
5329// save and restoration should be converted to and from the canonical weights and
5330// biases.
5331//
5332// num_layers: Specifies the number of layers in the RNN model.
5333// num_units: Specifies the size of the hidden state.
5334// input_size: Specifies the size of the input state.
5335// num_params: number of parameter sets for all layers.
5336//     Each layer may contain multiple parameter sets, with each set consisting of
5337//     a weight matrix and a bias vector.
5338// weights: the canonical form of weights that can be used for saving
5339//     and restoration. They are more likely to be compatible across different
5340//     generations.
5341// biases: the canonical form of biases that can be used for saving
5342//     and restoration. They are more likely to be compatible across different
5343//     generations.
5344// rnn_mode: Indicates the type of the RNN model.
5345// input_mode: Indicate whether there is a linear projection between the input and
5346//     The actual computation before the first layer. 'skip_input' is only allowed
5347//     when input_size == num_units; 'auto_select' implies 'skip_input' when
5348//     input_size == num_units; otherwise, it implies 'linear_input'.
5349// direction: Indicates whether a bidirectional model will be used.
5350//     dir = (direction == bidirectional) ? 2 : 1
5351// dropout: dropout probability. When set to 0., dropout is disabled.
5352// seed: the 1st part of a seed to initialize dropout.
5353// seed2: the 2nd part of a seed to initialize dropout.
5354func CudnnRNNParamsToCanonical(scope *Scope, num_layers tf.Output, num_units tf.Output, input_size tf.Output, params tf.Output, num_params int64, optional ...CudnnRNNParamsToCanonicalAttr) (weights []tf.Output, biases []tf.Output) {
5355	if scope.Err() != nil {
5356		return
5357	}
5358	attrs := map[string]interface{}{"num_params": num_params}
5359	for _, a := range optional {
5360		a(attrs)
5361	}
5362	opspec := tf.OpSpec{
5363		Type: "CudnnRNNParamsToCanonical",
5364		Input: []tf.Input{
5365			num_layers, num_units, input_size, params,
5366		},
5367		Attrs: attrs,
5368	}
5369	op := scope.AddOperation(opspec)
5370	if scope.Err() != nil {
5371		return
5372	}
5373	var idx int
5374	var err error
5375	if weights, idx, err = makeOutputList(op, idx, "weights"); err != nil {
5376		scope.UpdateErr("CudnnRNNParamsToCanonical", err)
5377		return
5378	}
5379	if biases, idx, err = makeOutputList(op, idx, "biases"); err != nil {
5380		scope.UpdateErr("CudnnRNNParamsToCanonical", err)
5381		return
5382	}
5383	return weights, biases
5384}
5385
5386// CudnnRNNBackpropV3Attr is an optional argument to CudnnRNNBackpropV3.
5387type CudnnRNNBackpropV3Attr func(optionalAttr)
5388
5389// CudnnRNNBackpropV3RnnMode sets the optional rnn_mode attribute to value.
5390// If not specified, defaults to "lstm"
5391func CudnnRNNBackpropV3RnnMode(value string) CudnnRNNBackpropV3Attr {
5392	return func(m optionalAttr) {
5393		m["rnn_mode"] = value
5394	}
5395}
5396
5397// CudnnRNNBackpropV3InputMode sets the optional input_mode attribute to value.
5398// If not specified, defaults to "linear_input"
5399func CudnnRNNBackpropV3InputMode(value string) CudnnRNNBackpropV3Attr {
5400	return func(m optionalAttr) {
5401		m["input_mode"] = value
5402	}
5403}
5404
5405// CudnnRNNBackpropV3Direction sets the optional direction attribute to value.
5406// If not specified, defaults to "unidirectional"
5407func CudnnRNNBackpropV3Direction(value string) CudnnRNNBackpropV3Attr {
5408	return func(m optionalAttr) {
5409		m["direction"] = value
5410	}
5411}
5412
5413// CudnnRNNBackpropV3Dropout sets the optional dropout attribute to value.
5414// If not specified, defaults to 0
5415func CudnnRNNBackpropV3Dropout(value float32) CudnnRNNBackpropV3Attr {
5416	return func(m optionalAttr) {
5417		m["dropout"] = value
5418	}
5419}
5420
5421// CudnnRNNBackpropV3Seed sets the optional seed attribute to value.
5422// If not specified, defaults to 0
5423func CudnnRNNBackpropV3Seed(value int64) CudnnRNNBackpropV3Attr {
5424	return func(m optionalAttr) {
5425		m["seed"] = value
5426	}
5427}
5428
5429// CudnnRNNBackpropV3Seed2 sets the optional seed2 attribute to value.
5430// If not specified, defaults to 0
5431func CudnnRNNBackpropV3Seed2(value int64) CudnnRNNBackpropV3Attr {
5432	return func(m optionalAttr) {
5433		m["seed2"] = value
5434	}
5435}
5436
5437// CudnnRNNBackpropV3NumProj sets the optional num_proj attribute to value.
5438// If not specified, defaults to 0
5439func CudnnRNNBackpropV3NumProj(value int64) CudnnRNNBackpropV3Attr {
5440	return func(m optionalAttr) {
5441		m["num_proj"] = value
5442	}
5443}
5444
5445// CudnnRNNBackpropV3TimeMajor sets the optional time_major attribute to value.
5446// If not specified, defaults to true
5447func CudnnRNNBackpropV3TimeMajor(value bool) CudnnRNNBackpropV3Attr {
5448	return func(m optionalAttr) {
5449		m["time_major"] = value
5450	}
5451}
5452
5453// Backprop step of CudnnRNNV3.
5454//
5455// Compute the backprop of both data and weights in a RNN. Takes an extra
5456//     "sequence_lengths" input than CudnnRNNBackprop.
5457//
5458// rnn_mode: Indicates the type of the RNN model.
5459// input_mode: Indicates whether there is a linear projection between the input and
5460//     the actual computation before the first layer. 'skip_input' is only allowed
5461//     when input_size == num_units; 'auto_select' implies 'skip_input' when
5462//     input_size == num_units; otherwise, it implies 'linear_input'.
5463// direction: Indicates whether a bidirectional model will be used. Should be
5464//   "unidirectional" or "bidirectional".
5465// dropout: Dropout probability. When set to 0., dropout is disabled.
5466// seed: The 1st part of a seed to initialize dropout.
5467// seed2: The 2nd part of a seed to initialize dropout.
5468// input: If time_major is true, this is a 3-D tensor with the shape of
5469//     [seq_length, batch_size, input_size]. If time_major is false, the shape is
5470//     [batch_size, seq_length, input_size].
5471// input_h: If time_major is true, this is a 3-D tensor with the shape of
5472//     [num_layer * dir, batch_size, num_units]. If time_major is false, the shape
5473//     is [batch_size, num_layer * dir, num_units].
5474// input_c: For LSTM, a 3-D tensor with the shape of
5475//     [num_layer * dir, batch, num_units]. For other models, it is ignored.
5476// params: A 1-D tensor that contains the weights and biases in an opaque layout.
5477//     The size must be created through CudnnRNNParamsSize, and initialized
5478//     separately. Note that they might not be compatible across different
5479//     generations. So it is a good idea to save and restore
5480// sequence_lengths: a vector of lengths of each input sequence.
5481// output: If time_major is true, this is a 3-D tensor with the shape of
5482//     [seq_length, batch_size, dir * num_units]. If time_major is false, the
5483//     shape is [batch_size, seq_length, dir * num_units].
5484// output_h: The same shape has input_h.
5485// output_c: The same shape as input_c for LSTM. An empty tensor for other models.
5486// output_backprop: A 3-D tensor with the same shape as output in the forward pass.
5487// output_h_backprop: A 3-D tensor with the same shape as output_h in the forward
5488//     pass.
5489// output_c_backprop: A 3-D tensor with the same shape as output_c in the forward
5490//     pass.
5491// time_major: Indicates whether the input/output format is time major or batch
5492//     major.
5493// reserve_space: The same reserve_space produced in the forward operation.
5494// input_backprop: The backprop to input in the forward pass. Has the same shape
5495//     as input.
5496// input_h_backprop: The backprop to input_h in the forward pass. Has the same
5497//     shape as input_h.
5498// input_c_backprop: The backprop to input_c in the forward pass. Has the same
5499//     shape as input_c.
5500// params_backprop: The backprop to the params buffer in the forward pass. Has the
5501//     same shape as params.
5502func CudnnRNNBackpropV3(scope *Scope, input tf.Output, input_h tf.Output, input_c tf.Output, params tf.Output, sequence_lengths tf.Output, output tf.Output, output_h tf.Output, output_c tf.Output, output_backprop tf.Output, output_h_backprop tf.Output, output_c_backprop tf.Output, reserve_space tf.Output, host_reserved tf.Output, optional ...CudnnRNNBackpropV3Attr) (input_backprop tf.Output, input_h_backprop tf.Output, input_c_backprop tf.Output, params_backprop tf.Output) {
5503	if scope.Err() != nil {
5504		return
5505	}
5506	attrs := map[string]interface{}{}
5507	for _, a := range optional {
5508		a(attrs)
5509	}
5510	opspec := tf.OpSpec{
5511		Type: "CudnnRNNBackpropV3",
5512		Input: []tf.Input{
5513			input, input_h, input_c, params, sequence_lengths, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space, host_reserved,
5514		},
5515		Attrs: attrs,
5516	}
5517	op := scope.AddOperation(opspec)
5518	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
5519}
5520
5521// CudnnRNNBackpropAttr is an optional argument to CudnnRNNBackprop.
5522type CudnnRNNBackpropAttr func(optionalAttr)
5523
5524// CudnnRNNBackpropRnnMode sets the optional rnn_mode attribute to value.
5525// If not specified, defaults to "lstm"
5526func CudnnRNNBackpropRnnMode(value string) CudnnRNNBackpropAttr {
5527	return func(m optionalAttr) {
5528		m["rnn_mode"] = value
5529	}
5530}
5531
5532// CudnnRNNBackpropInputMode sets the optional input_mode attribute to value.
5533// If not specified, defaults to "linear_input"
5534func CudnnRNNBackpropInputMode(value string) CudnnRNNBackpropAttr {
5535	return func(m optionalAttr) {
5536		m["input_mode"] = value
5537	}
5538}
5539
5540// CudnnRNNBackpropDirection sets the optional direction attribute to value.
5541// If not specified, defaults to "unidirectional"
5542func CudnnRNNBackpropDirection(value string) CudnnRNNBackpropAttr {
5543	return func(m optionalAttr) {
5544		m["direction"] = value
5545	}
5546}
5547
5548// CudnnRNNBackpropDropout sets the optional dropout attribute to value.
5549// If not specified, defaults to 0
5550func CudnnRNNBackpropDropout(value float32) CudnnRNNBackpropAttr {
5551	return func(m optionalAttr) {
5552		m["dropout"] = value
5553	}
5554}
5555
5556// CudnnRNNBackpropSeed sets the optional seed attribute to value.
5557// If not specified, defaults to 0
5558func CudnnRNNBackpropSeed(value int64) CudnnRNNBackpropAttr {
5559	return func(m optionalAttr) {
5560		m["seed"] = value
5561	}
5562}
5563
5564// CudnnRNNBackpropSeed2 sets the optional seed2 attribute to value.
5565// If not specified, defaults to 0
5566func CudnnRNNBackpropSeed2(value int64) CudnnRNNBackpropAttr {
5567	return func(m optionalAttr) {
5568		m["seed2"] = value
5569	}
5570}
5571
5572// Backprop step of CudnnRNN.
5573//
5574// Compute the backprop of both data and weights in a RNN.
5575//
5576// rnn_mode: Indicates the type of the RNN model.
5577// input_mode: Indicate whether there is a linear projection between the input and
5578//     the actual computation before the first layer. 'skip_input' is only allowed
5579//     when input_size == num_units; 'auto_select' implies 'skip_input' when
5580//     input_size == num_units; otherwise, it implies 'linear_input'.
5581// direction: Indicates whether a bidirectional model will be used. Should be
5582//   "unidirectional" or "bidirectional".
5583// dropout: Dropout probability. When set to 0., dropout is disabled.
5584// seed: The 1st part of a seed to initialize dropout.
5585// seed2: The 2nd part of a seed to initialize dropout.
5586// input: A 3-D tensor with the shape of [seq_length, batch_size, input_size].
5587// input_h: A 3-D tensor with the shape of [num_layer * dir, batch_size,
5588//     num_units].
5589// input_c: For LSTM, a 3-D tensor with the shape of
5590//     [num_layer * dir, batch, num_units]. For other models, it is ignored.
5591// params: A 1-D tensor that contains the weights and biases in an opaque layout.
5592//     The size must be created through CudnnRNNParamsSize, and initialized
5593//     separately. Note that they might not be compatible across different
5594//     generations. So it is a good idea to save and restore
5595// output: A 3-D tensor with the shape of [seq_length, batch_size,
5596//     dir * num_units].
5597// output_h: The same shape has input_h.
5598// output_c: The same shape as input_c for LSTM. An empty tensor for other models.
5599// output_backprop: A 3-D tensor with the same shape as output in the forward pass.
5600// output_h_backprop: A 3-D tensor with the same shape as output_h in the forward
5601//     pass.
5602// output_c_backprop: A 3-D tensor with the same shape as output_c in the forward
5603//     pass.
5604// reserve_space: The same reserve_space produced in for forward operation.
5605// input_backprop: The backprop to input in the forward pass. Has the same shape
5606//     as input.
5607// input_h_backprop: The backprop to input_h in the forward pass. Has the same
5608//     shape as input_h.
5609// input_c_backprop: The backprop to input_c in the forward pass. Has the same
5610//     shape as input_c.
5611// params_backprop: The backprop to the params buffer in the forward pass. Has the
5612//     same shape as params.
5613func CudnnRNNBackprop(scope *Scope, input tf.Output, input_h tf.Output, input_c tf.Output, params tf.Output, output tf.Output, output_h tf.Output, output_c tf.Output, output_backprop tf.Output, output_h_backprop tf.Output, output_c_backprop tf.Output, reserve_space tf.Output, optional ...CudnnRNNBackpropAttr) (input_backprop tf.Output, input_h_backprop tf.Output, input_c_backprop tf.Output, params_backprop tf.Output) {
5614	if scope.Err() != nil {
5615		return
5616	}
5617	attrs := map[string]interface{}{}
5618	for _, a := range optional {
5619		a(attrs)
5620	}
5621	opspec := tf.OpSpec{
5622		Type: "CudnnRNNBackprop",
5623		Input: []tf.Input{
5624			input, input_h, input_c, params, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space,
5625		},
5626		Attrs: attrs,
5627	}
5628	op := scope.AddOperation(opspec)
5629	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
5630}
5631
5632// CudnnRNNV3Attr is an optional argument to CudnnRNNV3.
5633type CudnnRNNV3Attr func(optionalAttr)
5634
5635// CudnnRNNV3RnnMode sets the optional rnn_mode attribute to value.
5636// If not specified, defaults to "lstm"
5637func CudnnRNNV3RnnMode(value string) CudnnRNNV3Attr {
5638	return func(m optionalAttr) {
5639		m["rnn_mode"] = value
5640	}
5641}
5642
5643// CudnnRNNV3InputMode sets the optional input_mode attribute to value.
5644// If not specified, defaults to "linear_input"
5645func CudnnRNNV3InputMode(value string) CudnnRNNV3Attr {
5646	return func(m optionalAttr) {
5647		m["input_mode"] = value
5648	}
5649}
5650
5651// CudnnRNNV3Direction sets the optional direction attribute to value.
5652// If not specified, defaults to "unidirectional"
5653func CudnnRNNV3Direction(value string) CudnnRNNV3Attr {
5654	return func(m optionalAttr) {
5655		m["direction"] = value
5656	}
5657}
5658
5659// CudnnRNNV3Dropout sets the optional dropout attribute to value.
5660// If not specified, defaults to 0
5661func CudnnRNNV3Dropout(value float32) CudnnRNNV3Attr {
5662	return func(m optionalAttr) {
5663		m["dropout"] = value
5664	}
5665}
5666
5667// CudnnRNNV3Seed sets the optional seed attribute to value.
5668// If not specified, defaults to 0
5669func CudnnRNNV3Seed(value int64) CudnnRNNV3Attr {
5670	return func(m optionalAttr) {
5671		m["seed"] = value
5672	}
5673}
5674
5675// CudnnRNNV3Seed2 sets the optional seed2 attribute to value.
5676// If not specified, defaults to 0
5677func CudnnRNNV3Seed2(value int64) CudnnRNNV3Attr {
5678	return func(m optionalAttr) {
5679		m["seed2"] = value
5680	}
5681}
5682
5683// CudnnRNNV3NumProj sets the optional num_proj attribute to value.
5684// If not specified, defaults to 0
5685func CudnnRNNV3NumProj(value int64) CudnnRNNV3Attr {
5686	return func(m optionalAttr) {
5687		m["num_proj"] = value
5688	}
5689}
5690
5691// CudnnRNNV3IsTraining sets the optional is_training attribute to value.
5692// If not specified, defaults to true
5693func CudnnRNNV3IsTraining(value bool) CudnnRNNV3Attr {
5694	return func(m optionalAttr) {
5695		m["is_training"] = value
5696	}
5697}
5698
5699// CudnnRNNV3TimeMajor sets the optional time_major attribute to value.
5700// If not specified, defaults to true
5701func CudnnRNNV3TimeMajor(value bool) CudnnRNNV3Attr {
5702	return func(m optionalAttr) {
5703		m["time_major"] = value
5704	}
5705}
5706
5707// A RNN backed by cuDNN.
5708//
5709// Computes the RNN from the input and initial states, with respect to the params
5710// buffer. Accepts one extra input "sequence_lengths" than CudnnRNN.
5711//
5712// rnn_mode: Indicates the type of the RNN model.
5713// input_mode: Indicates whether there is a linear projection between the input and
5714//   the actual computation before the first layer. 'skip_input' is only allowed
5715//   when input_size == num_units; 'auto_select' implies 'skip_input' when
5716//   input_size == num_units; otherwise, it implies 'linear_input'.
5717// direction: Indicates whether a bidirectional model will be used. Should be
5718//   "unidirectional" or "bidirectional".
5719// dropout: Dropout probability. When set to 0., dropout is disabled.
5720// seed: The 1st part of a seed to initialize dropout.
5721// seed2: The 2nd part of a seed to initialize dropout.
5722// input: If time_major is true, this is a 3-D tensor with the shape of
5723//     [seq_length, batch_size, input_size]. If time_major is false, the shape is
5724//     [batch_size, seq_length, input_size].
5725// input_h: If time_major is true, this is a 3-D tensor with the shape of
5726//     [num_layer * dir, batch_size, num_units]. If time_major is false, the shape
5727//     is [batch_size, num_layer * dir, num_units].
5728// input_c: For LSTM, a 3-D tensor with the shape of
5729//     [num_layer * dir, batch, num_units]. For other models, it is ignored.
5730// params: A 1-D tensor that contains the weights and biases in an opaque layout.
5731//     The size must be created through CudnnRNNParamsSize, and initialized
5732//     separately. Note that they might not be compatible across different
5733//     generations. So it is a good idea to save and restore
5734// sequence_lengths: a vector of lengths of each input sequence.
5735// output: If time_major is true, this is a 3-D tensor with the shape of
5736//     [seq_length, batch_size, dir * num_units]. If time_major is false, the
5737//     shape is [batch_size, seq_length, dir * num_units].
5738// output_h: The same shape has input_h.
5739// output_c: The same shape as input_c for LSTM. An empty tensor for other models.
5740// is_training: Indicates whether this operation is used for inference or
5741//   training.
5742// time_major: Indicates whether the input/output format is time major or batch
5743//     major.
5744// reserve_space: An opaque tensor that can be used in backprop calculation. It
5745//   is only produced if is_training is true.
5746func CudnnRNNV3(scope *Scope, input tf.Output, input_h tf.Output, input_c tf.Output, params tf.Output, sequence_lengths tf.Output, optional ...CudnnRNNV3Attr) (output tf.Output, output_h tf.Output, output_c tf.Output, reserve_space tf.Output, host_reserved tf.Output) {
5747	if scope.Err() != nil {
5748		return
5749	}
5750	attrs := map[string]interface{}{}
5751	for _, a := range optional {
5752		a(attrs)
5753	}
5754	opspec := tf.OpSpec{
5755		Type: "CudnnRNNV3",
5756		Input: []tf.Input{
5757			input, input_h, input_c, params, sequence_lengths,
5758		},
5759		Attrs: attrs,
5760	}
5761	op := scope.AddOperation(opspec)
5762	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
5763}
5764
5765// Pads a tensor with zeros.
5766//
5767// This operation pads a `input` with zeros according to the `paddings` you
5768// specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is the
5769// rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
5770// how many zeros to add before the contents of `input` in that dimension, and
5771// `paddings[D, 1]` indicates how many zeros to add after the contents of `input`
5772// in that dimension.
5773//
5774// The padded size of each dimension D of the output is:
5775//
5776// `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
5777//
5778// For example:
5779//
5780// ```
5781// # 't' is [[1, 1], [2, 2]]
5782// # 'paddings' is [[1, 1], [2, 2]]
5783// # rank of 't' is 2
5784// pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
5785//                       [0, 0, 1, 1, 0, 0]
5786//                       [0, 0, 2, 2, 0, 0]
5787//                       [0, 0, 0, 0, 0, 0]]
5788// ```
5789//
5790func Pad(scope *Scope, input tf.Output, paddings tf.Output) (output tf.Output) {
5791	if scope.Err() != nil {
5792		return
5793	}
5794	opspec := tf.OpSpec{
5795		Type: "Pad",
5796		Input: []tf.Input{
5797			input, paddings,
5798		},
5799	}
5800	op := scope.AddOperation(opspec)
5801	return op.Output(0)
5802}
5803
5804// CudnnRNNV2Attr is an optional argument to CudnnRNNV2.
5805type CudnnRNNV2Attr func(optionalAttr)
5806
5807// CudnnRNNV2RnnMode sets the optional rnn_mode attribute to value.
5808// If not specified, defaults to "lstm"
5809func CudnnRNNV2RnnMode(value string) CudnnRNNV2Attr {
5810	return func(m optionalAttr) {
5811		m["rnn_mode"] = value
5812	}
5813}
5814
5815// CudnnRNNV2InputMode sets the optional input_mode attribute to value.
5816// If not specified, defaults to "linear_input"
5817func CudnnRNNV2InputMode(value string) CudnnRNNV2Attr {
5818	return func(m optionalAttr) {
5819		m["input_mode"] = value
5820	}
5821}
5822
5823// CudnnRNNV2Direction sets the optional direction attribute to value.
5824// If not specified, defaults to "unidirectional"
5825func CudnnRNNV2Direction(value string) CudnnRNNV2Attr {
5826	return func(m optionalAttr) {
5827		m["direction"] = value
5828	}
5829}
5830
5831// CudnnRNNV2Dropout sets the optional dropout attribute to value.
5832// If not specified, defaults to 0
5833func CudnnRNNV2Dropout(value float32) CudnnRNNV2Attr {
5834	return func(m optionalAttr) {
5835		m["dropout"] = value
5836	}
5837}
5838
5839// CudnnRNNV2Seed sets the optional seed attribute to value.
5840// If not specified, defaults to 0
5841func CudnnRNNV2Seed(value int64) CudnnRNNV2Attr {
5842	return func(m optionalAttr) {
5843		m["seed"] = value
5844	}
5845}
5846
5847// CudnnRNNV2Seed2 sets the optional seed2 attribute to value.
5848// If not specified, defaults to 0
5849func CudnnRNNV2Seed2(value int64) CudnnRNNV2Attr {
5850	return func(m optionalAttr) {
5851		m["seed2"] = value
5852	}
5853}
5854
5855// CudnnRNNV2IsTraining sets the optional is_training attribute to value.
5856// If not specified, defaults to true
5857func CudnnRNNV2IsTraining(value bool) CudnnRNNV2Attr {
5858	return func(m optionalAttr) {
5859		m["is_training"] = value
5860	}
5861}
5862
5863// A RNN backed by cuDNN.
5864//
5865// Computes the RNN from the input and initial states, with respect to the params
5866// buffer. Produces one extra output "host_reserved" than CudnnRNN.
5867//
5868// rnn_mode: Indicates the type of the RNN model.
5869// input_mode: Indicates whether there is a linear projection between the input and
5870//   the actual computation before the first layer. 'skip_input' is only allowed
5871//   when input_size == num_units; 'auto_select' implies 'skip_input' when
5872//   input_size == num_units; otherwise, it implies 'linear_input'.
5873// direction: Indicates whether a bidirectional model will be used. Should be
5874//   "unidirectional" or "bidirectional".
5875// dropout: Dropout probability. When set to 0., dropout is disabled.
5876// seed: The 1st part of a seed to initialize dropout.
5877// seed2: The 2nd part of a seed to initialize dropout.
5878// input: A 3-D tensor with the shape of [seq_length, batch_size, input_size].
5879// input_h: A 3-D tensor with the shape of [num_layer * dir, batch_size,
5880//     num_units].
5881// input_c: For LSTM, a 3-D tensor with the shape of
5882//     [num_layer * dir, batch, num_units]. For other models, it is ignored.
5883// params: A 1-D tensor that contains the weights and biases in an opaque layout.
5884//     The size must be created through CudnnRNNParamsSize, and initialized
5885//     separately. Note that they might not be compatible across different
5886//     generations. So it is a good idea to save and restore
5887// output: A 3-D tensor with the shape of [seq_length, batch_size,
5888//     dir * num_units].
5889// output_h: The same shape has input_h.
5890// output_c: The same shape as input_c for LSTM. An empty tensor for other models.
5891// is_training: Indicates whether this operation is used for inference or
5892//   training.
5893// reserve_space: An opaque tensor that can be used in backprop calculation. It
5894//   is only produced if is_training is true.
5895// host_reserved: An opaque tensor that can be used in backprop calculation. It is
5896//   only produced if is_training is true. It is output on host memory rather than
5897//   device memory.
5898func CudnnRNNV2(scope *Scope, input tf.Output, input_h tf.Output, input_c tf.Output, params tf.Output, optional ...CudnnRNNV2Attr) (output tf.Output, output_h tf.Output, output_c tf.Output, reserve_space tf.Output, host_reserved tf.Output) {
5899	if scope.Err() != nil {
5900		return
5901	}
5902	attrs := map[string]interface{}{}
5903	for _, a := range optional {
5904		a(attrs)
5905	}
5906	opspec := tf.OpSpec{
5907		Type: "CudnnRNNV2",
5908		Input: []tf.Input{
5909			input, input_h, input_c, params,
5910		},
5911		Attrs: attrs,
5912	}
5913	op := scope.AddOperation(opspec)
5914	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
5915}
5916
5917// CudnnRNNParamsSizeAttr is an optional argument to CudnnRNNParamsSize.
5918type CudnnRNNParamsSizeAttr func(optionalAttr)
5919
5920// CudnnRNNParamsSizeRnnMode sets the optional rnn_mode attribute to value.
5921// If not specified, defaults to "lstm"
5922func CudnnRNNParamsSizeRnnMode(value string) CudnnRNNParamsSizeAttr {
5923	return func(m optionalAttr) {
5924		m["rnn_mode"] = value
5925	}
5926}
5927
5928// CudnnRNNParamsSizeInputMode sets the optional input_mode attribute to value.
5929// If not specified, defaults to "linear_input"
5930func CudnnRNNParamsSizeInputMode(value string) CudnnRNNParamsSizeAttr {
5931	return func(m optionalAttr) {
5932		m["input_mode"] = value
5933	}
5934}
5935
5936// CudnnRNNParamsSizeDirection sets the optional direction attribute to value.
5937// If not specified, defaults to "unidirectional"
5938func CudnnRNNParamsSizeDirection(value string) CudnnRNNParamsSizeAttr {
5939	return func(m optionalAttr) {
5940		m["direction"] = value
5941	}
5942}
5943
5944// CudnnRNNParamsSizeDropout sets the optional dropout attribute to value.
5945// If not specified, defaults to 0
5946func CudnnRNNParamsSizeDropout(value float32) CudnnRNNParamsSizeAttr {
5947	return func(m optionalAttr) {
5948		m["dropout"] = value
5949	}
5950}
5951
5952// CudnnRNNParamsSizeSeed sets the optional seed attribute to value.
5953// If not specified, defaults to 0
5954func CudnnRNNParamsSizeSeed(value int64) CudnnRNNParamsSizeAttr {
5955	return func(m optionalAttr) {
5956		m["seed"] = value
5957	}
5958}
5959
5960// CudnnRNNParamsSizeSeed2 sets the optional seed2 attribute to value.
5961// If not specified, defaults to 0
5962func CudnnRNNParamsSizeSeed2(value int64) CudnnRNNParamsSizeAttr {
5963	return func(m optionalAttr) {
5964		m["seed2"] = value
5965	}
5966}
5967
5968// CudnnRNNParamsSizeNumProj sets the optional num_proj attribute to value.
5969// If not specified, defaults to 0
5970func CudnnRNNParamsSizeNumProj(value int64) CudnnRNNParamsSizeAttr {
5971	return func(m optionalAttr) {
5972		m["num_proj"] = value
5973	}
5974}
5975
5976// Computes size of weights that can be used by a Cudnn RNN model.
5977//
5978// Return the params size that can be used by the Cudnn RNN model. Subsequent
5979// weight allocation and initialization should use this size.
5980//
5981// num_layers: Specifies the number of layers in the RNN model.
5982// num_units: Specifies the size of the hidden state.
5983// input_size: Specifies the size of the input state.
5984// rnn_mode: Indicates the type of the RNN model.
5985// input_mode: Indicate whether there is a linear projection between the input and
5986//   The actual computation before the first layer. 'skip_input' is only allowed
5987//   when input_size == num_units; 'auto_select' implies 'skip_input' when
5988//   input_size == num_units; otherwise, it implies 'linear_input'.
5989// direction: Indicates whether a bidirectional model will be used.
5990//   dir = (direction == bidirectional) ? 2 : 1
5991// dropout: dropout probability. When set to 0., dropout is disabled.
5992// seed: the 1st part of a seed to initialize dropout.
5993// seed2: the 2nd part of a seed to initialize dropout.
5994// params_size: The size of the params buffer that should be allocated and
5995//   initialized for this RNN model. Note that this params buffer may not be
5996//   compatible across GPUs. Please use CudnnRNNParamsWeights and
5997//   CudnnRNNParamsBiases to save and restore them in a way that is compatible
5998//   across different runs.
5999func CudnnRNNParamsSize(scope *Scope, num_layers tf.Output, num_units tf.Output, input_size tf.Output, T tf.DataType, S tf.DataType, optional ...CudnnRNNParamsSizeAttr) (params_size tf.Output) {
6000	if scope.Err() != nil {
6001		return
6002	}
6003	attrs := map[string]interface{}{"T": T, "S": S}
6004	for _, a := range optional {
6005		a(attrs)
6006	}
6007	opspec := tf.OpSpec{
6008		Type: "CudnnRNNParamsSize",
6009		Input: []tf.Input{
6010			num_layers, num_units, input_size,
6011		},
6012		Attrs: attrs,
6013	}
6014	op := scope.AddOperation(opspec)
6015	return op.Output(0)
6016}
6017
6018// RecordInputAttr is an optional argument to RecordInput.
6019type RecordInputAttr func(optionalAttr)
6020
6021// RecordInputFileRandomSeed sets the optional file_random_seed attribute to value.
6022//
6023// value: Random seeds used to produce randomized records.
6024// If not specified, defaults to 301
6025func RecordInputFileRandomSeed(value int64) RecordInputAttr {
6026	return func(m optionalAttr) {
6027		m["file_random_seed"] = value
6028	}
6029}
6030
6031// RecordInputFileShuffleShiftRatio sets the optional file_shuffle_shift_ratio attribute to value.
6032//
6033// value: Shifts the list of files after the list is randomly
6034// shuffled.
6035// If not specified, defaults to 0
6036func RecordInputFileShuffleShiftRatio(value float32) RecordInputAttr {
6037	return func(m optionalAttr) {
6038		m["file_shuffle_shift_ratio"] = value
6039	}
6040}
6041
6042// RecordInputFileBufferSize sets the optional file_buffer_size attribute to value.
6043//
6044// value: The randomization shuffling buffer.
6045// If not specified, defaults to 10000
6046func RecordInputFileBufferSize(value int64) RecordInputAttr {
6047	return func(m optionalAttr) {
6048		m["file_buffer_size"] = value
6049	}
6050}
6051
6052// RecordInputFileParallelism sets the optional file_parallelism attribute to value.
6053//
6054// value: How many sstables are opened and concurrently iterated over.
6055// If not specified, defaults to 16
6056func RecordInputFileParallelism(value int64) RecordInputAttr {
6057	return func(m optionalAttr) {
6058		m["file_parallelism"] = value
6059	}
6060}
6061
6062// RecordInputBatchSize sets the optional batch_size attribute to value.
6063//
6064// value: The batch size.
6065// If not specified, defaults to 32
6066func RecordInputBatchSize(value int64) RecordInputAttr {
6067	return func(m optionalAttr) {
6068		m["batch_size"] = value
6069	}
6070}
6071
6072// RecordInputCompressionType sets the optional compression_type attribute to value.
6073//
6074// value: The type of compression for the file. Currently ZLIB and
6075// GZIP are supported. Defaults to none.
6076// If not specified, defaults to ""
6077func RecordInputCompressionType(value string) RecordInputAttr {
6078	return func(m optionalAttr) {
6079		m["compression_type"] = value
6080	}
6081}
6082
6083// Emits randomized records.
6084//
6085// Arguments:
6086//	file_pattern: Glob pattern for the data files.
6087//
6088// Returns A tensor of shape [batch_size].
6089func RecordInput(scope *Scope, file_pattern string, optional ...RecordInputAttr) (records tf.Output) {
6090	if scope.Err() != nil {
6091		return
6092	}
6093	attrs := map[string]interface{}{"file_pattern": file_pattern}
6094	for _, a := range optional {
6095		a(attrs)
6096	}
6097	opspec := tf.OpSpec{
6098		Type: "RecordInput",
6099
6100		Attrs: attrs,
6101	}
6102	op := scope.AddOperation(opspec)
6103	return op.Output(0)
6104}
6105
6106// OrderedMapIncompleteSizeAttr is an optional argument to OrderedMapIncompleteSize.
6107type OrderedMapIncompleteSizeAttr func(optionalAttr)
6108
6109// OrderedMapIncompleteSizeCapacity sets the optional capacity attribute to value.
6110// If not specified, defaults to 0
6111//
6112// REQUIRES: value >= 0
6113func OrderedMapIncompleteSizeCapacity(value int64) OrderedMapIncompleteSizeAttr {
6114	return func(m optionalAttr) {
6115		m["capacity"] = value
6116	}
6117}
6118
6119// OrderedMapIncompleteSizeMemoryLimit sets the optional memory_limit attribute to value.
6120// If not specified, defaults to 0
6121//
6122// REQUIRES: value >= 0
6123func OrderedMapIncompleteSizeMemoryLimit(value int64) OrderedMapIncompleteSizeAttr {
6124	return func(m optionalAttr) {
6125		m["memory_limit"] = value
6126	}
6127}
6128
6129// OrderedMapIncompleteSizeContainer sets the optional container attribute to value.
6130// If not specified, defaults to ""
6131func OrderedMapIncompleteSizeContainer(value string) OrderedMapIncompleteSizeAttr {
6132	return func(m optionalAttr) {
6133		m["container"] = value
6134	}
6135}
6136
6137// OrderedMapIncompleteSizeSharedName sets the optional shared_name attribute to value.
6138// If not specified, defaults to ""
6139func OrderedMapIncompleteSizeSharedName(value string) OrderedMapIncompleteSizeAttr {
6140	return func(m optionalAttr) {
6141		m["shared_name"] = value
6142	}
6143}
6144
6145// Op returns the number of incomplete elements in the underlying container.
6146func OrderedMapIncompleteSize(scope *Scope, dtypes []tf.DataType, optional ...OrderedMapIncompleteSizeAttr) (size tf.Output) {
6147	if scope.Err() != nil {
6148		return
6149	}
6150	attrs := map[string]interface{}{"dtypes": dtypes}
6151	for _, a := range optional {
6152		a(attrs)
6153	}
6154	opspec := tf.OpSpec{
6155		Type: "OrderedMapIncompleteSize",
6156
6157		Attrs: attrs,
6158	}
6159	op := scope.AddOperation(opspec)
6160	return op.Output(0)
6161}
6162
6163// OrderedMapSizeAttr is an optional argument to OrderedMapSize.
6164type OrderedMapSizeAttr func(optionalAttr)
6165
6166// OrderedMapSizeCapacity sets the optional capacity attribute to value.
6167// If not specified, defaults to 0
6168//
6169// REQUIRES: value >= 0
6170func OrderedMapSizeCapacity(value int64) OrderedMapSizeAttr {
6171	return func(m optionalAttr) {
6172		m["capacity"] = value
6173	}
6174}
6175
6176// OrderedMapSizeMemoryLimit sets the optional memory_limit attribute to value.
6177// If not specified, defaults to 0
6178//
6179// REQUIRES: value >= 0
6180func OrderedMapSizeMemoryLimit(value int64) OrderedMapSizeAttr {
6181	return func(m optionalAttr) {
6182		m["memory_limit"] = value
6183	}
6184}
6185
6186// OrderedMapSizeContainer sets the optional container attribute to value.
6187// If not specified, defaults to ""
6188func OrderedMapSizeContainer(value string) OrderedMapSizeAttr {
6189	return func(m optionalAttr) {
6190		m["container"] = value
6191	}
6192}
6193
6194// OrderedMapSizeSharedName sets the optional shared_name attribute to value.
6195// If not specified, defaults to ""
6196func OrderedMapSizeSharedName(value string) OrderedMapSizeAttr {
6197	return func(m optionalAttr) {
6198		m["shared_name"] = value
6199	}
6200}
6201
6202// Op returns the number of elements in the underlying container.
6203func OrderedMapSize(scope *Scope, dtypes []tf.DataType, optional ...OrderedMapSizeAttr) (size tf.Output) {
6204	if scope.Err() != nil {
6205		return
6206	}
6207	attrs := map[string]interface{}{"dtypes": dtypes}
6208	for _, a := range optional {
6209		a(attrs)
6210	}
6211	opspec := tf.OpSpec{
6212		Type: "OrderedMapSize",
6213
6214		Attrs: attrs,
6215	}
6216	op := scope.AddOperation(opspec)
6217	return op.Output(0)
6218}
6219
6220// OrderedMapUnstageNoKeyAttr is an optional argument to OrderedMapUnstageNoKey.
6221type OrderedMapUnstageNoKeyAttr func(optionalAttr)
6222
6223// OrderedMapUnstageNoKeyCapacity sets the optional capacity attribute to value.
6224// If not specified, defaults to 0
6225//
6226// REQUIRES: value >= 0
6227func OrderedMapUnstageNoKeyCapacity(value int64) OrderedMapUnstageNoKeyAttr {
6228	return func(m optionalAttr) {
6229		m["capacity"] = value
6230	}
6231}
6232
6233// OrderedMapUnstageNoKeyMemoryLimit sets the optional memory_limit attribute to value.
6234// If not specified, defaults to 0
6235//
6236// REQUIRES: value >= 0
6237func OrderedMapUnstageNoKeyMemoryLimit(value int64) OrderedMapUnstageNoKeyAttr {
6238	return func(m optionalAttr) {
6239		m["memory_limit"] = value
6240	}
6241}
6242
6243// OrderedMapUnstageNoKeyContainer sets the optional container attribute to value.
6244// If not specified, defaults to ""
6245func OrderedMapUnstageNoKeyContainer(value string) OrderedMapUnstageNoKeyAttr {
6246	return func(m optionalAttr) {
6247		m["container"] = value
6248	}
6249}
6250
6251// OrderedMapUnstageNoKeySharedName sets the optional shared_name attribute to value.
6252// If not specified, defaults to ""
6253func OrderedMapUnstageNoKeySharedName(value string) OrderedMapUnstageNoKeyAttr {
6254	return func(m optionalAttr) {
6255		m["shared_name"] = value
6256	}
6257}
6258
6259// Op removes and returns the (key, value) element with the smallest
6260//
6261// key from the underlying container.   If the underlying container
6262// does not contain elements, the op will block until it does.
6263func OrderedMapUnstageNoKey(scope *Scope, indices tf.Output, dtypes []tf.DataType, optional ...OrderedMapUnstageNoKeyAttr) (key tf.Output, values []tf.Output) {
6264	if scope.Err() != nil {
6265		return
6266	}
6267	attrs := map[string]interface{}{"dtypes": dtypes}
6268	for _, a := range optional {
6269		a(attrs)
6270	}
6271	opspec := tf.OpSpec{
6272		Type: "OrderedMapUnstageNoKey",
6273		Input: []tf.Input{
6274			indices,
6275		},
6276		Attrs: attrs,
6277	}
6278	op := scope.AddOperation(opspec)
6279	if scope.Err() != nil {
6280		return
6281	}
6282	var idx int
6283	var err error
6284	key = op.Output(idx)
6285	if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
6286		scope.UpdateErr("OrderedMapUnstageNoKey", err)
6287		return
6288	}
6289	return key, values
6290}
6291
6292// OrderedMapPeekAttr is an optional argument to OrderedMapPeek.
6293type OrderedMapPeekAttr func(optionalAttr)
6294
6295// OrderedMapPeekCapacity sets the optional capacity attribute to value.
6296// If not specified, defaults to 0
6297//
6298// REQUIRES: value >= 0
6299func OrderedMapPeekCapacity(value int64) OrderedMapPeekAttr {
6300	return func(m optionalAttr) {
6301		m["capacity"] = value
6302	}
6303}
6304
6305// OrderedMapPeekMemoryLimit sets the optional memory_limit attribute to value.
6306// If not specified, defaults to 0
6307//
6308// REQUIRES: value >= 0
6309func OrderedMapPeekMemoryLimit(value int64) OrderedMapPeekAttr {
6310	return func(m optionalAttr) {
6311		m["memory_limit"] = value
6312	}
6313}
6314
6315// OrderedMapPeekContainer sets the optional container attribute to value.
6316// If not specified, defaults to ""
6317func OrderedMapPeekContainer(value string) OrderedMapPeekAttr {
6318	return func(m optionalAttr) {
6319		m["container"] = value
6320	}
6321}
6322
6323// OrderedMapPeekSharedName sets the optional shared_name attribute to value.
6324// If not specified, defaults to ""
6325func OrderedMapPeekSharedName(value string) OrderedMapPeekAttr {
6326	return func(m optionalAttr) {
6327		m["shared_name"] = value
6328	}
6329}
6330
6331// Op peeks at the values at the specified key.  If the
6332//
6333// underlying container does not contain this key
6334// this op will block until it does.   This Op is optimized for
6335// performance.
6336func OrderedMapPeek(scope *Scope, key tf.Output, indices tf.Output, dtypes []tf.DataType, optional ...OrderedMapPeekAttr) (values []tf.Output) {
6337	if scope.Err() != nil {
6338		return
6339	}
6340	attrs := map[string]interface{}{"dtypes": dtypes}
6341	for _, a := range optional {
6342		a(attrs)
6343	}
6344	opspec := tf.OpSpec{
6345		Type: "OrderedMapPeek",
6346		Input: []tf.Input{
6347			key, indices,
6348		},
6349		Attrs: attrs,
6350	}
6351	op := scope.AddOperation(opspec)
6352	if scope.Err() != nil {
6353		return
6354	}
6355	var idx int
6356	var err error
6357	if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
6358		scope.UpdateErr("OrderedMapPeek", err)
6359		return
6360	}
6361	return values
6362}
6363
6364// MapIncompleteSizeAttr is an optional argument to MapIncompleteSize.
6365type MapIncompleteSizeAttr func(optionalAttr)
6366
6367// MapIncompleteSizeCapacity sets the optional capacity attribute to value.
6368// If not specified, defaults to 0
6369//
6370// REQUIRES: value >= 0
6371func MapIncompleteSizeCapacity(value int64) MapIncompleteSizeAttr {
6372	return func(m optionalAttr) {
6373		m["capacity"] = value
6374	}
6375}
6376
6377// MapIncompleteSizeMemoryLimit sets the optional memory_limit attribute to value.
6378// If not specified, defaults to 0
6379//
6380// REQUIRES: value >= 0
6381func MapIncompleteSizeMemoryLimit(value int64) MapIncompleteSizeAttr {
6382	return func(m optionalAttr) {
6383		m["memory_limit"] = value
6384	}
6385}
6386
6387// MapIncompleteSizeContainer sets the optional container attribute to value.
6388// If not specified, defaults to ""
6389func MapIncompleteSizeContainer(value string) MapIncompleteSizeAttr {
6390	return func(m optionalAttr) {
6391		m["container"] = value
6392	}
6393}
6394
6395// MapIncompleteSizeSharedName sets the optional shared_name attribute to value.
6396// If not specified, defaults to ""
6397func MapIncompleteSizeSharedName(value string) MapIncompleteSizeAttr {
6398	return func(m optionalAttr) {
6399		m["shared_name"] = value
6400	}
6401}
6402
6403// Op returns the number of incomplete elements in the underlying container.
6404func MapIncompleteSize(scope *Scope, dtypes []tf.DataType, optional ...MapIncompleteSizeAttr) (size tf.Output) {
6405	if scope.Err() != nil {
6406		return
6407	}
6408	attrs := map[string]interface{}{"dtypes": dtypes}
6409	for _, a := range optional {
6410		a(attrs)
6411	}
6412	opspec := tf.OpSpec{
6413		Type: "MapIncompleteSize",
6414
6415		Attrs: attrs,
6416	}
6417	op := scope.AddOperation(opspec)
6418	return op.Output(0)
6419}
6420
6421// MapSizeAttr is an optional argument to MapSize.
6422type MapSizeAttr func(optionalAttr)
6423
6424// MapSizeCapacity sets the optional capacity attribute to value.
6425// If not specified, defaults to 0
6426//
6427// REQUIRES: value >= 0
6428func MapSizeCapacity(value int64) MapSizeAttr {
6429	return func(m optionalAttr) {
6430		m["capacity"] = value
6431	}
6432}
6433
6434// MapSizeMemoryLimit sets the optional memory_limit attribute to value.
6435// If not specified, defaults to 0
6436//
6437// REQUIRES: value >= 0
6438func MapSizeMemoryLimit(value int64) MapSizeAttr {
6439	return func(m optionalAttr) {
6440		m["memory_limit"] = value
6441	}
6442}
6443
6444// MapSizeContainer sets the optional container attribute to value.
6445// If not specified, defaults to ""
6446func MapSizeContainer(value string) MapSizeAttr {
6447	return func(m optionalAttr) {
6448		m["container"] = value
6449	}
6450}
6451
6452// MapSizeSharedName sets the optional shared_name attribute to value.
6453// If not specified, defaults to ""
6454func MapSizeSharedName(value string) MapSizeAttr {
6455	return func(m optionalAttr) {
6456		m["shared_name"] = value
6457	}
6458}
6459
6460// Op returns the number of elements in the underlying container.
6461func MapSize(scope *Scope, dtypes []tf.DataType, optional ...MapSizeAttr) (size tf.Output) {
6462	if scope.Err() != nil {
6463		return
6464	}
6465	attrs := map[string]interface{}{"dtypes": dtypes}
6466	for _, a := range optional {
6467		a(attrs)
6468	}
6469	opspec := tf.OpSpec{
6470		Type: "MapSize",
6471
6472		Attrs: attrs,
6473	}
6474	op := scope.AddOperation(opspec)
6475	return op.Output(0)
6476}
6477
6478// MapUnstageNoKeyAttr is an optional argument to MapUnstageNoKey.
6479type MapUnstageNoKeyAttr func(optionalAttr)
6480
6481// MapUnstageNoKeyCapacity sets the optional capacity attribute to value.
6482// If not specified, defaults to 0
6483//
6484// REQUIRES: value >= 0
6485func MapUnstageNoKeyCapacity(value int64) MapUnstageNoKeyAttr {
6486	return func(m optionalAttr) {
6487		m["capacity"] = value
6488	}
6489}
6490
6491// MapUnstageNoKeyMemoryLimit sets the optional memory_limit attribute to value.
6492// If not specified, defaults to 0
6493//
6494// REQUIRES: value >= 0
6495func MapUnstageNoKeyMemoryLimit(value int64) MapUnstageNoKeyAttr {
6496	return func(m optionalAttr) {
6497		m["memory_limit"] = value
6498	}
6499}
6500
6501// MapUnstageNoKeyContainer sets the optional container attribute to value.
6502// If not specified, defaults to ""
6503func MapUnstageNoKeyContainer(value string) MapUnstageNoKeyAttr {
6504	return func(m optionalAttr) {
6505		m["container"] = value
6506	}
6507}
6508
6509// MapUnstageNoKeySharedName sets the optional shared_name attribute to value.
6510// If not specified, defaults to ""
6511func MapUnstageNoKeySharedName(value string) MapUnstageNoKeyAttr {
6512	return func(m optionalAttr) {
6513		m["shared_name"] = value
6514	}
6515}
6516
6517// Op removes and returns a random (key, value)
6518//
6519// from the underlying container.   If the underlying container
6520// does not contain elements, the op will block until it does.
6521func MapUnstageNoKey(scope *Scope, indices tf.Output, dtypes []tf.DataType, optional ...MapUnstageNoKeyAttr) (key tf.Output, values []tf.Output) {
6522	if scope.Err() != nil {
6523		return
6524	}
6525	attrs := map[string]interface{}{"dtypes": dtypes}
6526	for _, a := range optional {
6527		a(attrs)
6528	}
6529	opspec := tf.OpSpec{
6530		Type: "MapUnstageNoKey",
6531		Input: []tf.Input{
6532			indices,
6533		},
6534		Attrs: attrs,
6535	}
6536	op := scope.AddOperation(opspec)
6537	if scope.Err() != nil {
6538		return
6539	}
6540	var idx int
6541	var err error
6542	key = op.Output(idx)
6543	if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
6544		scope.UpdateErr("MapUnstageNoKey", err)
6545		return
6546	}
6547	return key, values
6548}
6549
6550// UnbatchAttr is an optional argument to Unbatch.
6551type UnbatchAttr func(optionalAttr)
6552
6553// UnbatchContainer sets the optional container attribute to value.
6554// If not specified, defaults to ""
6555func UnbatchContainer(value string) UnbatchAttr {
6556	return func(m optionalAttr) {
6557		m["container"] = value
6558	}
6559}
6560
6561// UnbatchSharedName sets the optional shared_name attribute to value.
6562// If not specified, defaults to ""
6563func UnbatchSharedName(value string) UnbatchAttr {
6564	return func(m optionalAttr) {
6565		m["shared_name"] = value
6566	}
6567}
6568
6569// Reverses the operation of Batch for a single output Tensor.
6570//
6571// An instance of Unbatch either receives an empty batched_tensor, in which case it
6572// asynchronously waits until the values become available from a concurrently
6573// running instance of Unbatch with the same container and shared_name, or receives
6574// a non-empty batched_tensor in which case it finalizes all other concurrently
6575// running instances and outputs its own element from the batch.
6576//
6577// batched_tensor: The possibly transformed output of Batch. The size of the first
6578//  dimension should remain unchanged by the transformations for the operation to
6579//  work.
6580// batch_index: The matching batch_index obtained from Batch.
6581// id: The id scalar emitted by Batch.
6582// unbatched_tensor: The Tensor corresponding to this execution.
6583// timeout_micros: Maximum amount of time (in microseconds) to wait to receive the
6584//  batched input tensor associated with a given invocation of the op.
6585// container: Container to control resource sharing.
6586// shared_name: Instances of Unbatch with the same container and shared_name are
6587//  assumed to possibly belong to the same batch. If left empty, the op name will
6588//  be used as the shared name.
6589func Unbatch(scope *Scope, batched_tensor tf.Output, batch_index tf.Output, id tf.Output, timeout_micros int64, optional ...UnbatchAttr) (unbatched_tensor tf.Output) {
6590	if scope.Err() != nil {
6591		return
6592	}
6593	attrs := map[string]interface{}{"timeout_micros": timeout_micros}
6594	for _, a := range optional {
6595		a(attrs)
6596	}
6597	opspec := tf.OpSpec{
6598		Type: "Unbatch",
6599		Input: []tf.Input{
6600			batched_tensor, batch_index, id,
6601		},
6602		Attrs: attrs,
6603	}
6604	op := scope.AddOperation(opspec)
6605	return op.Output(0)
6606}
6607
6608// MapUnstageAttr is an optional argument to MapUnstage.
6609type MapUnstageAttr func(optionalAttr)
6610
6611// MapUnstageCapacity sets the optional capacity attribute to value.
6612// If not specified, defaults to 0
6613//
6614// REQUIRES: value >= 0
6615func MapUnstageCapacity(value int64) MapUnstageAttr {
6616	return func(m optionalAttr) {
6617		m["capacity"] = value
6618	}
6619}
6620
6621// MapUnstageMemoryLimit sets the optional memory_limit attribute to value.
6622// If not specified, defaults to 0
6623//
6624// REQUIRES: value >= 0
6625func MapUnstageMemoryLimit(value int64) MapUnstageAttr {
6626	return func(m optionalAttr) {
6627		m["memory_limit"] = value
6628	}
6629}
6630
6631// MapUnstageContainer sets the optional container attribute to value.
6632// If not specified, defaults to ""
6633func MapUnstageContainer(value string) MapUnstageAttr {
6634	return func(m optionalAttr) {
6635		m["container"] = value
6636	}
6637}
6638
6639// MapUnstageSharedName sets the optional shared_name attribute to value.
6640// If not specified, defaults to ""
6641func MapUnstageSharedName(value string) MapUnstageAttr {
6642	return func(m optionalAttr) {
6643		m["shared_name"] = value
6644	}
6645}
6646
6647// Op removes and returns the values associated with the key
6648//
6649// from the underlying container.   If the underlying container
6650// does not contain this key, the op will block until it does.
6651func MapUnstage(scope *Scope, key tf.Output, indices tf.Output, dtypes []tf.DataType, optional ...MapUnstageAttr) (values []tf.Output) {
6652	if scope.Err() != nil {
6653		return
6654	}
6655	attrs := map[string]interface{}{"dtypes": dtypes}
6656	for _, a := range optional {
6657		a(attrs)
6658	}
6659	opspec := tf.OpSpec{
6660		Type: "MapUnstage",
6661		Input: []tf.Input{
6662			key, indices,
6663		},
6664		Attrs: attrs,
6665	}
6666	op := scope.AddOperation(opspec)
6667	if scope.Err() != nil {
6668		return
6669	}
6670	var idx int
6671	var err error
6672	if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
6673		scope.UpdateErr("MapUnstage", err)
6674		return
6675	}
6676	return values
6677}
6678
6679// StageSizeAttr is an optional argument to StageSize.
6680type StageSizeAttr func(optionalAttr)
6681
6682// StageSizeCapacity sets the optional capacity attribute to value.
6683// If not specified, defaults to 0
6684//
6685// REQUIRES: value >= 0
6686func StageSizeCapacity(value int64) StageSizeAttr {
6687	return func(m optionalAttr) {
6688		m["capacity"] = value
6689	}
6690}
6691
6692// StageSizeMemoryLimit sets the optional memory_limit attribute to value.
6693// If not specified, defaults to 0
6694//
6695// REQUIRES: value >= 0
6696func StageSizeMemoryLimit(value int64) StageSizeAttr {
6697	return func(m optionalAttr) {
6698		m["memory_limit"] = value
6699	}
6700}
6701
6702// StageSizeContainer sets the optional container attribute to value.
6703// If not specified, defaults to ""
6704func StageSizeContainer(value string) StageSizeAttr {
6705	return func(m optionalAttr) {
6706		m["container"] = value
6707	}
6708}
6709
6710// StageSizeSharedName sets the optional shared_name attribute to value.
6711// If not specified, defaults to ""
6712func StageSizeSharedName(value string) StageSizeAttr {
6713	return func(m optionalAttr) {
6714		m["shared_name"] = value
6715	}
6716}
6717
6718// Op returns the number of elements in the underlying container.
6719func StageSize(scope *Scope, dtypes []tf.DataType, optional ...StageSizeAttr) (size tf.Output) {
6720	if scope.Err() != nil {
6721		return
6722	}
6723	attrs := map[string]interface{}{"dtypes": dtypes}
6724	for _, a := range optional {
6725		a(attrs)
6726	}
6727	opspec := tf.OpSpec{
6728		Type: "StageSize",
6729
6730		Attrs: attrs,
6731	}
6732	op := scope.AddOperation(opspec)
6733	return op.Output(0)
6734}
6735
6736// StagePeekAttr is an optional argument to StagePeek.
6737type StagePeekAttr func(optionalAttr)
6738
6739// StagePeekCapacity sets the optional capacity attribute to value.
6740// If not specified, defaults to 0
6741//
6742// REQUIRES: value >= 0
6743func StagePeekCapacity(value int64) StagePeekAttr {
6744	return func(m optionalAttr) {
6745		m["capacity"] = value
6746	}
6747}
6748
6749// StagePeekMemoryLimit sets the optional memory_limit attribute to value.
6750// If not specified, defaults to 0
6751//
6752// REQUIRES: value >= 0
6753func StagePeekMemoryLimit(value int64) StagePeekAttr {
6754	return func(m optionalAttr) {
6755		m["memory_limit"] = value
6756	}
6757}
6758
6759// StagePeekContainer sets the optional container attribute to value.
6760// If not specified, defaults to ""
6761func StagePeekContainer(value string) StagePeekAttr {
6762	return func(m optionalAttr) {
6763		m["container"] = value
6764	}
6765}
6766
6767// StagePeekSharedName sets the optional shared_name attribute to value.
6768// If not specified, defaults to ""
6769func StagePeekSharedName(value string) StagePeekAttr {
6770	return func(m optionalAttr) {
6771		m["shared_name"] = value
6772	}
6773}
6774
6775// Op peeks at the values at the specified index.  If the
6776//
6777// underlying container does not contain sufficient elements
6778// this op will block until it does.   This Op is optimized for
6779// performance.
6780func StagePeek(scope *Scope, index tf.Output, dtypes []tf.DataType, optional ...StagePeekAttr) (values []tf.Output) {
6781	if scope.Err() != nil {
6782		return
6783	}
6784	attrs := map[string]interface{}{"dtypes": dtypes}
6785	for _, a := range optional {
6786		a(attrs)
6787	}
6788	opspec := tf.OpSpec{
6789		Type: "StagePeek",
6790		Input: []tf.Input{
6791			index,
6792		},
6793		Attrs: attrs,
6794	}
6795	op := scope.AddOperation(opspec)
6796	if scope.Err() != nil {
6797		return
6798	}
6799	var idx int
6800	var err error
6801	if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
6802		scope.UpdateErr("StagePeek", err)
6803		return
6804	}
6805	return values
6806}
6807
6808// UnstageAttr is an optional argument to Unstage.
6809type UnstageAttr func(optionalAttr)
6810
6811// UnstageCapacity sets the optional capacity attribute to value.
6812// If not specified, defaults to 0
6813//
6814// REQUIRES: value >= 0
6815func UnstageCapacity(value int64) UnstageAttr {
6816	return func(m optionalAttr) {
6817		m["capacity"] = value
6818	}
6819}
6820
6821// UnstageMemoryLimit sets the optional memory_limit attribute to value.
6822// If not specified, defaults to 0
6823//
6824// REQUIRES: value >= 0
6825func UnstageMemoryLimit(value int64) UnstageAttr {
6826	return func(m optionalAttr) {
6827		m["memory_limit"] = value
6828	}
6829}
6830
6831// UnstageContainer sets the optional container attribute to value.
6832// If not specified, defaults to ""
6833func UnstageContainer(value string) UnstageAttr {
6834	return func(m optionalAttr) {
6835		m["container"] = value
6836	}
6837}
6838
6839// UnstageSharedName sets the optional shared_name attribute to value.
6840// If not specified, defaults to ""
6841func UnstageSharedName(value string) UnstageAttr {
6842	return func(m optionalAttr) {
6843		m["shared_name"] = value
6844	}
6845}
6846
6847// Op is similar to a lightweight Dequeue.
6848//
6849// The basic functionality is similar to dequeue with many fewer
6850// capabilities and options.  This Op is optimized for performance.
6851func Unstage(scope *Scope, dtypes []tf.DataType, optional ...UnstageAttr) (values []tf.Output) {
6852	if scope.Err() != nil {
6853		return
6854	}
6855	attrs := map[string]interface{}{"dtypes": dtypes}
6856	for _, a := range optional {
6857		a(attrs)
6858	}
6859	opspec := tf.OpSpec{
6860		Type: "Unstage",
6861
6862		Attrs: attrs,
6863	}
6864	op := scope.AddOperation(opspec)
6865	if scope.Err() != nil {
6866		return
6867	}
6868	var idx int
6869	var err error
6870	if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
6871		scope.UpdateErr("Unstage", err)
6872		return
6873	}
6874	return values
6875}
6876
6877// StageAttr is an optional argument to Stage.
6878type StageAttr func(optionalAttr)
6879
6880// StageCapacity sets the optional capacity attribute to value.
6881//
6882// value: Maximum number of elements in the Staging Area. If > 0, inserts
6883// on the container will block when the capacity is reached.
6884// If not specified, defaults to 0
6885//
6886// REQUIRES: value >= 0
6887func StageCapacity(value int64) StageAttr {
6888	return func(m optionalAttr) {
6889		m["capacity"] = value
6890	}
6891}
6892
6893// StageMemoryLimit sets the optional memory_limit attribute to value.
6894//
6895// value: The maximum number of bytes allowed for Tensors in the Staging Area.
6896// If > 0, inserts will block until sufficient space is available.
6897// If not specified, defaults to 0
6898//
6899// REQUIRES: value >= 0
6900func StageMemoryLimit(value int64) StageAttr {
6901	return func(m optionalAttr) {
6902		m["memory_limit"] = value
6903	}
6904}
6905
6906// StageContainer sets the optional container attribute to value.
6907//
6908// value: If non-empty, this queue is placed in the given container. Otherwise,
6909// a default container is used.
6910// If not specified, defaults to ""
6911func StageContainer(value string) StageAttr {
6912	return func(m optionalAttr) {
6913		m["container"] = value
6914	}
6915}
6916
6917// StageSharedName sets the optional shared_name attribute to value.
6918//
6919// value: It is necessary to match this name to the matching Unstage Op.
6920// If not specified, defaults to ""
6921func StageSharedName(value string) StageAttr {
6922	return func(m optionalAttr) {
6923		m["shared_name"] = value
6924	}
6925}
6926
6927// Stage values similar to a lightweight Enqueue.
6928//
6929// The basic functionality of this Op is similar to a queue with many
6930// fewer capabilities and options.  This Op is optimized for performance.
6931//
6932// Arguments:
6933//	values: a list of tensors
6934// dtypes A list of data types that inserted values should adhere to.
6935//
6936// Returns the created operation.
6937func Stage(scope *Scope, values []tf.Output, optional ...StageAttr) (o *tf.Operation) {
6938	if scope.Err() != nil {
6939		return
6940	}
6941	attrs := map[string]interface{}{}
6942	for _, a := range optional {
6943		a(attrs)
6944	}
6945	opspec := tf.OpSpec{
6946		Type: "Stage",
6947		Input: []tf.Input{
6948			tf.OutputList(values),
6949		},
6950		Attrs: attrs,
6951	}
6952	return scope.AddOperation(opspec)
6953}
6954
6955// Delete the tensor specified by its handle in the session.
6956//
6957// Arguments:
6958//	handle: The handle for a tensor stored in the session state.
6959//
6960// Returns the created operation.
6961func DeleteSessionTensor(scope *Scope, handle tf.Output) (o *tf.Operation) {
6962	if scope.Err() != nil {
6963		return
6964	}
6965	opspec := tf.OpSpec{
6966		Type: "DeleteSessionTensor",
6967		Input: []tf.Input{
6968			handle,
6969		},
6970	}
6971	return scope.AddOperation(opspec)
6972}
6973
6974// Store the input tensor in the state of the current session.
6975//
6976// Arguments:
6977//	value: The tensor to be stored.
6978//
6979// Returns The handle for the tensor stored in the session state, represented
6980// as a ResourceHandle object.
6981func GetSessionHandleV2(scope *Scope, value tf.Output) (handle tf.Output) {
6982	if scope.Err() != nil {
6983		return
6984	}
6985	opspec := tf.OpSpec{
6986		Type: "GetSessionHandleV2",
6987		Input: []tf.Input{
6988			value,
6989		},
6990	}
6991	op := scope.AddOperation(opspec)
6992	return op.Output(0)
6993}
6994
6995// Store the input tensor in the state of the current session.
6996//
6997// Arguments:
6998//	value: The tensor to be stored.
6999//
7000// Returns The handle for the tensor stored in the session state, represented
7001// as a string.
7002func GetSessionHandle(scope *Scope, value tf.Output) (handle tf.Output) {
7003	if scope.Err() != nil {
7004		return
7005	}
7006	opspec := tf.OpSpec{
7007		Type: "GetSessionHandle",
7008		Input: []tf.Input{
7009			value,
7010		},
7011	}
7012	op := scope.AddOperation(opspec)
7013	return op.Output(0)
7014}
7015
7016// Copy a tensor setting everything outside a central band in each innermost matrix to zero.
7017//
7018// The `band` part is computed as follows:
7019// Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a
7020// tensor with the same shape where
7021//
7022// `band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`.
7023//
7024// The indicator function
7025//
7026// `in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) &&
7027//                  (num_upper < 0 || (n-m) <= num_upper)`.
7028//
7029// For example:
7030//
7031// ```
7032// # if 'input' is [[ 0,  1,  2, 3]
7033// #                [-1,  0,  1, 2]
7034// #                [-2, -1,  0, 1]
7035// #                [-3, -2, -1, 0]],
7036//
7037// tf.matrix_band_part(input, 1, -1) ==> [[ 0,  1,  2, 3]
7038//                                        [-1,  0,  1, 2]
7039//                                        [ 0, -1,  0, 1]
7040//                                        [ 0,  0, -1, 0]],
7041//
7042// tf.matrix_band_part(input, 2, 1) ==> [[ 0,  1,  0, 0]
7043//                                       [-1,  0,  1, 0]
7044//                                       [-2, -1,  0, 1]
7045//                                       [ 0, -2, -1, 0]]
7046// ```
7047//
7048// Useful special cases:
7049//
7050// ```
7051//  tf.matrix_band_part(input, 0, -1) ==> Upper triangular part.
7052//  tf.matrix_band_part(input, -1, 0) ==> Lower triangular part.
7053//  tf.matrix_band_part(input, 0, 0) ==> Diagonal.
7054// ```
7055//
7056// Arguments:
7057//	input: Rank `k` tensor.
7058//	num_lower: 0-D tensor. Number of subdiagonals to keep. If negative, keep entire
7059// lower triangle.
7060//	num_upper: 0-D tensor. Number of superdiagonals to keep. If negative, keep
7061// entire upper triangle.
7062//
7063// Returns Rank `k` tensor of the same shape as input. The extracted banded tensor.
7064func MatrixBandPart(scope *Scope, input tf.Output, num_lower tf.Output, num_upper tf.Output) (band tf.Output) {
7065	if scope.Err() != nil {
7066		return
7067	}
7068	opspec := tf.OpSpec{
7069		Type: "MatrixBandPart",
7070		Input: []tf.Input{
7071			input, num_lower, num_upper,
7072		},
7073	}
7074	op := scope.AddOperation(opspec)
7075	return op.Output(0)
7076}
7077
7078// ListDiffAttr is an optional argument to ListDiff.
7079type ListDiffAttr func(optionalAttr)
7080
7081// ListDiffOutIdx sets the optional out_idx attribute to value.
7082// If not specified, defaults to DT_INT32
7083func ListDiffOutIdx(value tf.DataType) ListDiffAttr {
7084	return func(m optionalAttr) {
7085		m["out_idx"] = value
7086	}
7087}
7088
7089// Computes the difference between two lists of numbers or strings.
7090//
7091// Given a list `x` and a list `y`, this operation returns a list `out` that
7092// represents all values that are in `x` but not in `y`. The returned list `out`
7093// is sorted in the same order that the numbers appear in `x` (duplicates are
7094// preserved). This operation also returns a list `idx` that represents the
7095// position of each `out` element in `x`. In other words:
7096//
7097// `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]`
7098//
7099// For example, given this input:
7100//
7101// ```
7102// x = [1, 2, 3, 4, 5, 6]
7103// y = [1, 3, 5]
7104// ```
7105//
7106// This operation would return:
7107//
7108// ```
7109// out ==> [2, 4, 6]
7110// idx ==> [1, 3, 5]
7111// ```
7112//
7113// Arguments:
7114//	x: 1-D. Values to keep.
7115//	y: 1-D. Values to remove.
7116//
7117// Returns:
7118//	out: 1-D. Values present in `x` but not in `y`.
7119//	idx: 1-D. Positions of `x` values preserved in `out`.
7120func ListDiff(scope *Scope, x tf.Output, y tf.Output, optional ...ListDiffAttr) (out tf.Output, idx tf.Output) {
7121	if scope.Err() != nil {
7122		return
7123	}
7124	attrs := map[string]interface{}{}
7125	for _, a := range optional {
7126		a(attrs)
7127	}
7128	opspec := tf.OpSpec{
7129		Type: "ListDiff",
7130		Input: []tf.Input{
7131			x, y,
7132		},
7133		Attrs: attrs,
7134	}
7135	op := scope.AddOperation(opspec)
7136	return op.Output(0), op.Output(1)
7137}
7138
7139// Deprecated. Use TensorArrayScatterV3
7140//
7141// DEPRECATED at GraphDef version 26: Use TensorArrayScatterV3
7142func TensorArrayScatterV2(scope *Scope, handle tf.Output, indices tf.Output, value tf.Output, flow_in tf.Output) (flow_out tf.Output) {
7143	if scope.Err() != nil {
7144		return
7145	}
7146	opspec := tf.OpSpec{
7147		Type: "TensorArrayScatterV2",
7148		Input: []tf.Input{
7149			handle, indices, value, flow_in,
7150		},
7151	}
7152	op := scope.AddOperation(opspec)
7153	return op.Output(0)
7154}
7155
7156// Deprecated. Use TensorArrayReadV3
7157//
7158// DEPRECATED at GraphDef version 26: Use TensorArrayReadV3
7159func TensorArrayReadV2(scope *Scope, handle tf.Output, index tf.Output, flow_in tf.Output, dtype tf.DataType) (value tf.Output) {
7160	if scope.Err() != nil {
7161		return
7162	}
7163	attrs := map[string]interface{}{"dtype": dtype}
7164	opspec := tf.OpSpec{
7165		Type: "TensorArrayReadV2",
7166		Input: []tf.Input{
7167			handle, index, flow_in,
7168		},
7169		Attrs: attrs,
7170	}
7171	op := scope.AddOperation(opspec)
7172	return op.Output(0)
7173}
7174
7175// Deprecated. Use TensorArrayGradV3
7176//
7177// DEPRECATED at GraphDef version 26: Use TensorArrayGradV3
7178func TensorArrayGradV2(scope *Scope, handle tf.Output, flow_in tf.Output, source string) (grad_handle tf.Output) {
7179	if scope.Err() != nil {
7180		return
7181	}
7182	attrs := map[string]interface{}{"source": source}
7183	opspec := tf.OpSpec{
7184		Type: "TensorArrayGradV2",
7185		Input: []tf.Input{
7186			handle, flow_in,
7187		},
7188		Attrs: attrs,
7189	}
7190	op := scope.AddOperation(opspec)
7191	return op.Output(0)
7192}
7193
7194// Get the current size of the TensorArray.
7195//
7196// Arguments:
7197//	handle: The handle to a TensorArray (output of TensorArray or TensorArrayGrad).
7198//	flow_in: A float scalar that enforces proper chaining of operations.
7199//
7200// Returns The current size of the TensorArray.
7201func TensorArraySizeV3(scope *Scope, handle tf.Output, flow_in tf.Output) (size tf.Output) {
7202	if scope.Err() != nil {
7203		return
7204	}
7205	opspec := tf.OpSpec{
7206		Type: "TensorArraySizeV3",
7207		Input: []tf.Input{
7208			handle, flow_in,
7209		},
7210	}
7211	op := scope.AddOperation(opspec)
7212	return op.Output(0)
7213}
7214
7215// Split the data from the input value into TensorArray elements.
7216//
7217// Assuming that `lengths` takes on values
7218//
7219//   ```(n0, n1, ..., n(T-1))```
7220//
7221// and that `value` has shape
7222//
7223//   ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```,
7224//
7225// this splits values into a TensorArray with T tensors.
7226//
7227// TensorArray index t will be the subtensor of values with starting position
7228//
7229//   ```(n0 + n1 + ... + n(t-1), 0, 0, ...)```
7230//
7231// and having size
7232//
7233//   ```nt x d0 x d1 x ...```
7234//
7235// Arguments:
7236//	handle: The handle to a TensorArray.
7237//	value: The concatenated tensor to write to the TensorArray.
7238//	lengths: The vector of lengths, how to split the rows of value into the
7239// TensorArray.
7240//	flow_in: A float scalar that enforces proper chaining of operations.
7241//
7242// Returns A float scalar that enforces proper chaining of operations.
7243func TensorArraySplitV3(scope *Scope, handle tf.Output, value tf.Output, lengths tf.Output, flow_in tf.Output) (flow_out tf.Output) {
7244	if scope.Err() != nil {
7245		return
7246	}
7247	opspec := tf.OpSpec{
7248		Type: "TensorArraySplitV3",
7249		Input: []tf.Input{
7250			handle, value, lengths, flow_in,
7251		},
7252	}
7253	op := scope.AddOperation(opspec)
7254	return op.Output(0)
7255}
7256
7257// TensorArrayConcatV3Attr is an optional argument to TensorArrayConcatV3.
7258type TensorArrayConcatV3Attr func(optionalAttr)
7259
7260// TensorArrayConcatV3ElementShapeExcept0 sets the optional element_shape_except0 attribute to value.
7261//
7262// value: The expected shape of an element, if known,
7263// excluding the first dimension. Used to validate the shapes of
7264// TensorArray elements. If this shape is not fully specified, concatenating
7265// zero-size TensorArrays is an error.
7266// If not specified, defaults to <unknown_rank:true >
7267func TensorArrayConcatV3ElementShapeExcept0(value tf.Shape) TensorArrayConcatV3Attr {
7268	return func(m optionalAttr) {
7269		m["element_shape_except0"] = value
7270	}
7271}
7272
7273// Concat the elements from the TensorArray into value `value`.
7274//
7275// Takes `T` elements of shapes
7276//
7277//   ```
7278//   (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...)
7279//   ```
7280//
7281// and concatenates them into a Tensor of shape:
7282//
7283//   ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```
7284//
7285// All elements must have the same shape (excepting the first dimension).
7286//
7287// Arguments:
7288//	handle: The handle to a TensorArray.
7289//	flow_in: A float scalar that enforces proper chaining of operations.
7290//	dtype: The type of the elem that is returned.
7291//
7292// Returns:
7293//	value: All of the elements in the TensorArray, concatenated along the first
7294// axis.
7295//	lengths: A vector of the row sizes of the original T elements in the
7296// value output.  In the example above, this would be the values:
7297// `(n1, n2, ..., n(T-1))`.
7298func TensorArrayConcatV3(scope *Scope, handle tf.Output, flow_in tf.Output, dtype tf.DataType, optional ...TensorArrayConcatV3Attr) (value tf.Output, lengths tf.Output) {
7299	if scope.Err() != nil {
7300		return
7301	}
7302	attrs := map[string]interface{}{"dtype": dtype}
7303	for _, a := range optional {
7304		a(attrs)
7305	}
7306	opspec := tf.OpSpec{
7307		Type: "TensorArrayConcatV3",
7308		Input: []tf.Input{
7309			handle, flow_in,
7310		},
7311		Attrs: attrs,
7312	}
7313	op := scope.AddOperation(opspec)
7314	return op.Output(0), op.Output(1)
7315}
7316
7317// TensorArrayGatherV3Attr is an optional argument to TensorArrayGatherV3.
7318type TensorArrayGatherV3Attr func(optionalAttr)
7319
7320// TensorArrayGatherV3ElementShape sets the optional element_shape attribute to value.
7321//
7322// value: The expected shape of an element, if known. Used to
7323// validate the shapes of TensorArray elements. If this shape is not
7324// fully specified, gathering zero-size TensorArrays is an error.
7325// If not specified, defaults to <unknown_rank:true >
7326func TensorArrayGatherV3ElementShape(value tf.Shape) TensorArrayGatherV3Attr {
7327	return func(m optionalAttr) {
7328		m["element_shape"] = value
7329	}
7330}
7331
7332// Gather specific elements from the TensorArray into output `value`.
7333//
7334// All elements selected by `indices` must have the same shape.
7335//
7336// Arguments:
7337//	handle: The handle to a TensorArray.
7338//	indices: The locations in the TensorArray from which to read tensor elements.
7339//	flow_in: A float scalar that enforces proper chaining of operations.
7340//	dtype: The type of the elem that is returned.
7341//
7342// Returns All of the elements in the TensorArray, concatenated along a new
7343// axis (the new dimension 0).
7344func TensorArrayGatherV3(scope *Scope, handle tf.Output, indices tf.Output, flow_in tf.Output, dtype tf.DataType, optional ...TensorArrayGatherV3Attr) (value tf.Output) {
7345	if scope.Err() != nil {
7346		return
7347	}
7348	attrs := map[string]interface{}{"dtype": dtype}
7349	for _, a := range optional {
7350		a(attrs)
7351	}
7352	opspec := tf.OpSpec{
7353		Type: "TensorArrayGatherV3",
7354		Input: []tf.Input{
7355			handle, indices, flow_in,
7356		},
7357		Attrs: attrs,
7358	}
7359	op := scope.AddOperation(opspec)
7360	return op.Output(0)
7361}
7362
7363// GatherAttr is an optional argument to Gather.
7364type GatherAttr func(optionalAttr)
7365
7366// GatherValidateIndices sets the optional validate_indices attribute to value.
7367// If not specified, defaults to true
7368func GatherValidateIndices(value bool) GatherAttr {
7369	return func(m optionalAttr) {
7370		m["validate_indices"] = value
7371	}
7372}
7373
7374// Gather slices from `params` according to `indices`.
7375//
7376// `indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
7377// Produces an output tensor with shape `indices.shape + params.shape[1:]` where:
7378//
7379// ```python
7380//     # Scalar indices
7381//     output[:, ..., :] = params[indices, :, ... :]
7382//
7383//     # Vector indices
7384//     output[i, :, ..., :] = params[indices[i], :, ... :]
7385//
7386//     # Higher rank indices
7387//     output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]
7388// ```
7389//
7390// If `indices` is a permutation and `len(indices) == params.shape[0]` then
7391// this operation will permute `params` accordingly.
7392//
7393// `validate_indices`: DEPRECATED. If this operation is assigned to CPU, values in
7394// `indices` are always validated to be within range. If assigned to GPU,
7395// out-of-bound indices result in safe but unspecified behavior, which may include
7396// raising an error.
7397//
7398// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
7399// <img style="width:100%" src="https://www.tensorflow.org/images/Gather.png" alt>
7400// </div>
7401func Gather(scope *Scope, params tf.Output, indices tf.Output, optional ...GatherAttr) (output tf.Output) {
7402	if scope.Err() != nil {
7403		return
7404	}
7405	attrs := map[string]interface{}{}
7406	for _, a := range optional {
7407		a(attrs)
7408	}
7409	opspec := tf.OpSpec{
7410		Type: "Gather",
7411		Input: []tf.Input{
7412			params, indices,
7413		},
7414		Attrs: attrs,
7415	}
7416	op := scope.AddOperation(opspec)
7417	return op.Output(0)
7418}
7419
7420// Read an element from the TensorArray into output `value`.
7421//
7422// Arguments:
7423//	handle: The handle to a TensorArray.
7424//
7425//	flow_in: A float scalar that enforces proper chaining of operations.
7426//	dtype: The type of the elem that is returned.
7427//
7428// Returns The tensor that is read from the TensorArray.
7429func TensorArrayReadV3(scope *Scope, handle tf.Output, index tf.Output, flow_in tf.Output, dtype tf.DataType) (value tf.Output) {
7430	if scope.Err() != nil {
7431		return
7432	}
7433	attrs := map[string]interface{}{"dtype": dtype}
7434	opspec := tf.OpSpec{
7435		Type: "TensorArrayReadV3",
7436		Input: []tf.Input{
7437			handle, index, flow_in,
7438		},
7439		Attrs: attrs,
7440	}
7441	op := scope.AddOperation(opspec)
7442	return op.Output(0)
7443}
7444
7445// Push an element onto the tensor_array.
7446//
7447// Arguments:
7448//	handle: The handle to a TensorArray.
7449//	index: The position to write to inside the TensorArray.
7450//	value: The tensor to write to the TensorArray.
7451//	flow_in: A float scalar that enforces proper chaining of operations.
7452//
7453// Returns A float scalar that enforces proper chaining of operations.
7454func TensorArrayWriteV3(scope *Scope, handle tf.Output, index tf.Output, value tf.Output, flow_in tf.Output) (flow_out tf.Output) {
7455	if scope.Err() != nil {
7456		return
7457	}
7458	opspec := tf.OpSpec{
7459		Type: "TensorArrayWriteV3",
7460		Input: []tf.Input{
7461			handle, index, value, flow_in,
7462		},
7463	}
7464	op := scope.AddOperation(opspec)
7465	return op.Output(0)
7466}
7467
7468// Creates a TensorArray for storing multiple gradients of values in the given handle.
7469//
7470// Similar to TensorArrayGradV3. However it creates an accumulator with an
7471// expanded shape compared to the input TensorArray whose gradient is being
7472// computed. This enables multiple gradients for the same TensorArray to be
7473// calculated using the same accumulator.
7474//
7475// Arguments:
7476//	handle: The handle to the forward TensorArray.
7477//	flow_in: A float scalar that enforces proper chaining of operations.
7478//	shape_to_prepend: An int32 vector representing a shape. Elements in the gradient accumulator will
7479// have shape which is this shape_to_prepend value concatenated with shape of the
7480// elements in the TensorArray corresponding to the input handle.
7481//	source: The gradient source string, used to decide which gradient TensorArray
7482// to return.
7483func TensorArrayGradWithShape(scope *Scope, handle tf.Output, flow_in tf.Output, shape_to_prepend tf.Output, source string) (grad_handle tf.Output, flow_out tf.Output) {
7484	if scope.Err() != nil {
7485		return
7486	}
7487	attrs := map[string]interface{}{"source": source}
7488	opspec := tf.OpSpec{
7489		Type: "TensorArrayGradWithShape",
7490		Input: []tf.Input{
7491			handle, flow_in, shape_to_prepend,
7492		},
7493		Attrs: attrs,
7494	}
7495	op := scope.AddOperation(opspec)
7496	return op.Output(0), op.Output(1)
7497}
7498
7499// Delete the stack from its resource container.
7500//
7501// Arguments:
7502//	handle: The handle to a stack.
7503//
7504// Returns the created operation.
7505func StackCloseV2(scope *Scope, handle tf.Output) (o *tf.Operation) {
7506	if scope.Err() != nil {
7507		return
7508	}
7509	opspec := tf.OpSpec{
7510		Type: "StackCloseV2",
7511		Input: []tf.Input{
7512			handle,
7513		},
7514	}
7515	return scope.AddOperation(opspec)
7516}
7517
7518// Pop the element at the top of the stack.
7519//
7520// Arguments:
7521//	handle: The handle to a stack.
7522//	elem_type: The type of the elem that is popped.
7523//
7524// Returns The tensor that is popped from the top of the stack.
7525func StackPopV2(scope *Scope, handle tf.Output, elem_type tf.DataType) (elem tf.Output) {
7526	if scope.Err() != nil {
7527		return
7528	}
7529	attrs := map[string]interface{}{"elem_type": elem_type}
7530	opspec := tf.OpSpec{
7531		Type: "StackPopV2",
7532		Input: []tf.Input{
7533			handle,
7534		},
7535		Attrs: attrs,
7536	}
7537	op := scope.AddOperation(opspec)
7538	return op.Output(0)
7539}
7540
7541// StackPushV2Attr is an optional argument to StackPushV2.
7542type StackPushV2Attr func(optionalAttr)
7543
7544// StackPushV2SwapMemory sets the optional swap_memory attribute to value.
7545//
7546// value: Swap `elem` to CPU. Default to false.
7547// If not specified, defaults to false
7548func StackPushV2SwapMemory(value bool) StackPushV2Attr {
7549	return func(m optionalAttr) {
7550		m["swap_memory"] = value
7551	}
7552}
7553
7554// Push an element onto the stack.
7555//
7556// Arguments:
7557//	handle: The handle to a stack.
7558//	elem: The tensor to be pushed onto the stack.
7559//
7560// Returns The same tensor as the input 'elem'.
7561func StackPushV2(scope *Scope, handle tf.Output, elem tf.Output, optional ...StackPushV2Attr) (output tf.Output) {
7562	if scope.Err() != nil {
7563		return
7564	}
7565	attrs := map[string]interface{}{}
7566	for _, a := range optional {
7567		a(attrs)
7568	}
7569	opspec := tf.OpSpec{
7570		Type: "StackPushV2",
7571		Input: []tf.Input{
7572			handle, elem,
7573		},
7574		Attrs: attrs,
7575	}
7576	op := scope.AddOperation(opspec)
7577	return op.Output(0)
7578}
7579
7580// StackV2Attr is an optional argument to StackV2.
7581type StackV2Attr func(optionalAttr)
7582
7583// StackV2StackName sets the optional stack_name attribute to value.
7584//
7585// value: Overrides the name used for the temporary stack resource. Default
7586// value is the name of the 'Stack' op (which is guaranteed unique).
7587// If not specified, defaults to ""
7588func StackV2StackName(value string) StackV2Attr {
7589	return func(m optionalAttr) {
7590		m["stack_name"] = value
7591	}
7592}
7593
7594// A stack that produces elements in first-in last-out order.
7595//
7596// Arguments:
7597//	max_size: The maximum size of the stack if non-negative. If negative, the stack
7598// size is unlimited.
7599//	elem_type: The type of the elements on the stack.
7600//
7601// Returns The handle to the stack.
7602func StackV2(scope *Scope, max_size tf.Output, elem_type tf.DataType, optional ...StackV2Attr) (handle tf.Output) {
7603	if scope.Err() != nil {
7604		return
7605	}
7606	attrs := map[string]interface{}{"elem_type": elem_type}
7607	for _, a := range optional {
7608		a(attrs)
7609	}
7610	opspec := tf.OpSpec{
7611		Type: "StackV2",
7612		Input: []tf.Input{
7613			max_size,
7614		},
7615		Attrs: attrs,
7616	}
7617	op := scope.AddOperation(opspec)
7618	return op.Output(0)
7619}
7620
7621// Checks a tensor for NaN, -Inf and +Inf values.
7622//
7623// When run, reports an `InvalidArgument` error if `tensor` has any values
7624// that are not a number (NaN) or infinity (Inf). Otherwise, passes `tensor` as-is.
7625// Unlike CheckNumerics (V1), CheckNumericsV2 distinguishes -Inf and +Inf in the
7626// errors it throws.
7627//
7628// Arguments:
7629//
7630//	message: Prefix of the error message.
7631func CheckNumericsV2(scope *Scope, tensor tf.Output, message string) (output tf.Output) {
7632	if scope.Err() != nil {
7633		return
7634	}
7635	attrs := map[string]interface{}{"message": message}
7636	opspec := tf.OpSpec{
7637		Type: "CheckNumericsV2",
7638		Input: []tf.Input{
7639			tensor,
7640		},
7641		Attrs: attrs,
7642	}
7643	op := scope.AddOperation(opspec)
7644	return op.Output(0)
7645}
7646
7647// Applies a gradient to a given accumulator.
7648//
7649// Does not add if local_step is lesser than the accumulator's global_step.
7650//
7651// Arguments:
7652//	handle: The handle to a accumulator.
7653//	local_step: The local_step value at which the gradient was computed.
7654//	gradient: A tensor of the gradient to be accumulated.
7655//
7656// Returns the created operation.
7657func ResourceAccumulatorApplyGradient(scope *Scope, handle tf.Output, local_step tf.Output, gradient tf.Output) (o *tf.Operation) {
7658	if scope.Err() != nil {
7659		return
7660	}
7661	opspec := tf.OpSpec{
7662		Type: "ResourceAccumulatorApplyGradient",
7663		Input: []tf.Input{
7664			handle, local_step, gradient,
7665		},
7666	}
7667	return scope.AddOperation(opspec)
7668}
7669
7670// Wraps the XLA Pad operator, documented at
7671//
7672//  https://www.tensorflow.org/performance/xla/operation_semantics#pad
7673// .
7674//
7675// Arguments:
7676//	input: A `Tensor` of type T.
7677//	padding_value: A scalar `Tensor` of type T.
7678//	padding_low: the padding to apply at the start of each input dimensions. Must
7679// be a compile-time constant 1D tensor of length equal to rank of input.
7680//	padding_high: the padding to apply at the end of each input dimension. Must
7681// be a compile-time constant 1D tensor of length equal to rank of input.
7682//	padding_interior: the padding to apply between each input element. Must
7683// be a compile-time constant 1D tensor of length equal to rank of input,
7684// containing only non-negative values.
7685//
7686// Returns A `Tensor` of type T.
7687func XlaPad(scope *Scope, input tf.Output, padding_value tf.Output, padding_low tf.Output, padding_high tf.Output, padding_interior tf.Output) (output tf.Output) {
7688	if scope.Err() != nil {
7689		return
7690	}
7691	opspec := tf.OpSpec{
7692		Type: "XlaPad",
7693		Input: []tf.Input{
7694			input, padding_value, padding_low, padding_high, padding_interior,
7695		},
7696	}
7697	op := scope.AddOperation(opspec)
7698	return op.Output(0)
7699}
7700
7701// Updates the accumulator with a new value for global_step.
7702//
7703// Logs warning if the accumulator's value is already higher than
7704// new_global_step.
7705//
7706// Arguments:
7707//	handle: The handle to an accumulator.
7708//	new_global_step: The new global_step value to set.
7709//
7710// Returns the created operation.
7711func ResourceAccumulatorSetGlobalStep(scope *Scope, handle tf.Output, new_global_step tf.Output) (o *tf.Operation) {
7712	if scope.Err() != nil {
7713		return
7714	}
7715	opspec := tf.OpSpec{
7716		Type: "ResourceAccumulatorSetGlobalStep",
7717		Input: []tf.Input{
7718			handle, new_global_step,
7719		},
7720	}
7721	return scope.AddOperation(opspec)
7722}
7723
7724// Computes the number of elements in the given queue.
7725//
7726// Arguments:
7727//	handle: The handle to a queue.
7728//
7729// Returns The number of elements in the given queue.
7730func QueueSizeV2(scope *Scope, handle tf.Output) (size tf.Output) {
7731	if scope.Err() != nil {
7732		return
7733	}
7734	opspec := tf.OpSpec{
7735		Type: "QueueSizeV2",
7736		Input: []tf.Input{
7737			handle,
7738		},
7739	}
7740	op := scope.AddOperation(opspec)
7741	return op.Output(0)
7742}
7743
7744// QueueEnqueueManyV2Attr is an optional argument to QueueEnqueueManyV2.
7745type QueueEnqueueManyV2Attr func(optionalAttr)
7746
7747// QueueEnqueueManyV2TimeoutMs sets the optional timeout_ms attribute to value.
7748//
7749// value: If the queue is too full, this operation will block for up
7750// to timeout_ms milliseconds.
7751// Note: This option is not supported yet.
7752// If not specified, defaults to -1
7753func QueueEnqueueManyV2TimeoutMs(value int64) QueueEnqueueManyV2Attr {
7754	return func(m optionalAttr) {
7755		m["timeout_ms"] = value
7756	}
7757}
7758
7759// Enqueues zero or more tuples of one or more tensors in the given queue.
7760//
7761// This operation slices each component tensor along the 0th dimension to
7762// make multiple queue elements. All of the tuple components must have the
7763// same size in the 0th dimension.
7764//
7765// The components input has k elements, which correspond to the components of
7766// tuples stored in the given queue.
7767//
7768// N.B. If the queue is full, this operation will block until the given
7769// elements have been enqueued (or 'timeout_ms' elapses, if specified).
7770//
7771// Arguments:
7772//	handle: The handle to a queue.
7773//	components: One or more tensors from which the enqueued tensors should
7774// be taken.
7775//
7776// Returns the created operation.
7777func QueueEnqueueManyV2(scope *Scope, handle tf.Output, components []tf.Output, optional ...QueueEnqueueManyV2Attr) (o *tf.Operation) {
7778	if scope.Err() != nil {
7779		return
7780	}
7781	attrs := map[string]interface{}{}
7782	for _, a := range optional {
7783		a(attrs)
7784	}
7785	opspec := tf.OpSpec{
7786		Type: "QueueEnqueueManyV2",
7787		Input: []tf.Input{
7788			handle, tf.OutputList(components),
7789		},
7790		Attrs: attrs,
7791	}
7792	return scope.AddOperation(opspec)
7793}
7794
7795// QueueEnqueueV2Attr is an optional argument to QueueEnqueueV2.
7796type QueueEnqueueV2Attr func(optionalAttr)
7797
7798// QueueEnqueueV2TimeoutMs sets the optional timeout_ms attribute to value.
7799//
7800// value: If the queue is full, this operation will block for up to
7801// timeout_ms milliseconds.
7802// Note: This option is not supported yet.
7803// If not specified, defaults to -1
7804func QueueEnqueueV2TimeoutMs(value int64) QueueEnqueueV2Attr {
7805	return func(m optionalAttr) {
7806		m["timeout_ms"] = value
7807	}
7808}
7809
7810// Enqueues a tuple of one or more tensors in the given queue.
7811//
7812// The components input has k elements, which correspond to the components of
7813// tuples stored in the given queue.
7814//
7815// N.B. If the queue is full, this operation will block until the given
7816// element has been enqueued (or 'timeout_ms' elapses, if specified).
7817//
7818// Arguments:
7819//	handle: The handle to a queue.
7820//	components: One or more tensors from which the enqueued tensors should be taken.
7821//
7822// Returns the created operation.
7823func QueueEnqueueV2(scope *Scope, handle tf.Output, components []tf.Output, optional ...QueueEnqueueV2Attr) (o *tf.Operation) {
7824	if scope.Err() != nil {
7825		return
7826	}
7827	attrs := map[string]interface{}{}
7828	for _, a := range optional {
7829		a(attrs)
7830	}
7831	opspec := tf.OpSpec{
7832		Type: "QueueEnqueueV2",
7833		Input: []tf.Input{
7834			handle, tf.OutputList(components),
7835		},
7836		Attrs: attrs,
7837	}
7838	return scope.AddOperation(opspec)
7839}
7840
7841// PriorityQueueV2Attr is an optional argument to PriorityQueueV2.
7842type PriorityQueueV2Attr func(optionalAttr)
7843
7844// PriorityQueueV2ComponentTypes sets the optional component_types attribute to value.
7845//
7846// value: The type of each component in a value.
7847// If not specified, defaults to <>
7848//
7849// REQUIRES: len(value) >= 0
7850func PriorityQueueV2ComponentTypes(value []tf.DataType) PriorityQueueV2Attr {
7851	return func(m optionalAttr) {
7852		m["component_types"] = value
7853	}
7854}
7855
7856// PriorityQueueV2Capacity sets the optional capacity attribute to value.
7857//
7858// value: The upper bound on the number of elements in this queue.
7859// Negative numbers mean no limit.
7860// If not specified, defaults to -1
7861func PriorityQueueV2Capacity(value int64) PriorityQueueV2Attr {
7862	return func(m optionalAttr) {
7863		m["capacity"] = value
7864	}
7865}
7866
7867// PriorityQueueV2Container sets the optional container attribute to value.
7868//
7869// value: If non-empty, this queue is placed in the given container.
7870// Otherwise, a default container is used.
7871// If not specified, defaults to ""
7872func PriorityQueueV2Container(value string) PriorityQueueV2Attr {
7873	return func(m optionalAttr) {
7874		m["container"] = value
7875	}
7876}
7877
7878// PriorityQueueV2SharedName sets the optional shared_name attribute to value.
7879//
7880// value: If non-empty, this queue will be shared under the given name
7881// across multiple sessions.
7882// If not specified, defaults to ""
7883func PriorityQueueV2SharedName(value string) PriorityQueueV2Attr {
7884	return func(m optionalAttr) {
7885		m["shared_name"] = value
7886	}
7887}
7888
7889// A queue that produces elements sorted by the first component value.
7890//
7891// Note that the PriorityQueue requires the first component of any element
7892// to be a scalar int64, in addition to the other elements declared by
7893// component_types.  Therefore calls to Enqueue and EnqueueMany (resp. Dequeue
7894// and DequeueMany) on a PriorityQueue will all require (resp. output) one extra
7895// entry in their input (resp. output) lists.
7896//
7897// Arguments:
7898//	shapes: The shape of each component in a value. The length of this attr must
7899// be either 0 or the same as the length of component_types. If the length of
7900// this attr is 0, the shapes of queue elements are not constrained, and
7901// only one element may be dequeued at a time.
7902//
7903// Returns The handle to the queue.
7904func PriorityQueueV2(scope *Scope, shapes []tf.Shape, optional ...PriorityQueueV2Attr) (handle tf.Output) {
7905	if scope.Err() != nil {
7906		return
7907	}
7908	attrs := map[string]interface{}{"shapes": shapes}
7909	for _, a := range optional {
7910		a(attrs)
7911	}
7912	opspec := tf.OpSpec{
7913		Type: "PriorityQueueV2",
7914
7915		Attrs: attrs,
7916	}
7917	op := scope.AddOperation(opspec)
7918	return op.Output(0)
7919}
7920
7921// Partitions `data` into `num_partitions` tensors using indices from `partitions`.
7922//
7923// For each index tuple `js` of size `partitions.ndim`, the slice `data[js, ...]`
7924// becomes part of `outputs[partitions[js]]`.  The slices with `partitions[js] = i`
7925// are placed in `outputs[i]` in lexicographic order of `js`, and the first
7926// dimension of `outputs[i]` is the number of entries in `partitions` equal to `i`.
7927// In detail,
7928//
7929// ```python
7930//     outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:]
7931//
7932//     outputs[i] = pack([data[js, ...] for js if partitions[js] == i])
7933// ```
7934//
7935// `data.shape` must start with `partitions.shape`.
7936//
7937// For example:
7938//
7939// ```python
7940//     # Scalar partitions.
7941//     partitions = 1
7942//     num_partitions = 2
7943//     data = [10, 20]
7944//     outputs[0] = []  # Empty with shape [0, 2]
7945//     outputs[1] = [[10, 20]]
7946//
7947//     # Vector partitions.
7948//     partitions = [0, 0, 1, 1, 0]
7949//     num_partitions = 2
7950//     data = [10, 20, 30, 40, 50]
7951//     outputs[0] = [10, 20, 50]
7952//     outputs[1] = [30, 40]
7953// ```
7954//
7955// See `dynamic_stitch` for an example on how to merge partitions back.
7956//
7957// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
7958// <img style="width:100%" src="https://www.tensorflow.org/images/DynamicPartition.png" alt>
7959// </div>
7960//
7961// Arguments:
7962//
7963//	partitions: Any shape.  Indices in the range `[0, num_partitions)`.
7964//	num_partitions: The number of partitions to output.
7965func DynamicPartition(scope *Scope, data tf.Output, partitions tf.Output, num_partitions int64) (outputs []tf.Output) {
7966	if scope.Err() != nil {
7967		return
7968	}
7969	attrs := map[string]interface{}{"num_partitions": num_partitions}
7970	opspec := tf.OpSpec{
7971		Type: "DynamicPartition",
7972		Input: []tf.Input{
7973			data, partitions,
7974		},
7975		Attrs: attrs,
7976	}
7977	op := scope.AddOperation(opspec)
7978	if scope.Err() != nil {
7979		return
7980	}
7981	var idx int
7982	var err error
7983	if outputs, idx, err = makeOutputList(op, idx, "outputs"); err != nil {
7984		scope.UpdateErr("DynamicPartition", err)
7985		return
7986	}
7987	return outputs
7988}
7989
7990// ResourceConditionalAccumulatorAttr is an optional argument to ResourceConditionalAccumulator.
7991type ResourceConditionalAccumulatorAttr func(optionalAttr)
7992
7993// ResourceConditionalAccumulatorContainer sets the optional container attribute to value.
7994//
7995// value: If non-empty, this accumulator is placed in the given container.
7996// Otherwise, a default container is used.
7997// If not specified, defaults to ""
7998func ResourceConditionalAccumulatorContainer(value string) ResourceConditionalAccumulatorAttr {
7999	return func(m optionalAttr) {
8000		m["container"] = value
8001	}
8002}
8003
8004// ResourceConditionalAccumulatorSharedName sets the optional shared_name attribute to value.
8005//
8006// value: If non-empty, this accumulator will be shared under the
8007// given name across multiple sessions.
8008// If not specified, defaults to ""
8009func ResourceConditionalAccumulatorSharedName(value string) ResourceConditionalAccumulatorAttr {
8010	return func(m optionalAttr) {
8011		m["shared_name"] = value
8012	}
8013}
8014
8015// ResourceConditionalAccumulatorReductionType sets the optional reduction_type attribute to value.
8016// If not specified, defaults to "MEAN"
8017func ResourceConditionalAccumulatorReductionType(value string) ResourceConditionalAccumulatorAttr {
8018	return func(m optionalAttr) {
8019		m["reduction_type"] = value
8020	}
8021}
8022
8023// A conditional accumulator for aggregating gradients.
8024//
8025// The accumulator accepts gradients marked with local_step greater or
8026// equal to the most recent global_step known to the accumulator. The
8027// average can be extracted from the accumulator, provided sufficient
8028// gradients have been accumulated. Extracting the average automatically
8029// resets the aggregate to 0, and increments the global_step recorded by
8030// the accumulator.
8031// This is a resource version of ConditionalAccumulator that will work in TF2.0
8032// with tf.cond version 2.
8033//
8034// Arguments:
8035//	dtype: The type of the value being accumulated.
8036//	shape: The shape of the values, can be [], in which case shape is unknown.
8037//
8038// Returns The handle to the accumulator.
8039func ResourceConditionalAccumulator(scope *Scope, dtype tf.DataType, shape tf.Shape, optional ...ResourceConditionalAccumulatorAttr) (handle tf.Output) {
8040	if scope.Err() != nil {
8041		return
8042	}
8043	attrs := map[string]interface{}{"dtype": dtype, "shape": shape}
8044	for _, a := range optional {
8045		a(attrs)
8046	}
8047	opspec := tf.OpSpec{
8048		Type: "ResourceConditionalAccumulator",
8049
8050		Attrs: attrs,
8051	}
8052	op := scope.AddOperation(opspec)
8053	return op.Output(0)
8054}
8055
8056// MultiDeviceIteratorFromStringHandleAttr is an optional argument to MultiDeviceIteratorFromStringHandle.
8057type MultiDeviceIteratorFromStringHandleAttr func(optionalAttr)
8058
8059// MultiDeviceIteratorFromStringHandleOutputTypes sets the optional output_types attribute to value.
8060//
8061// value: The type list for the return values.
8062// If not specified, defaults to <>
8063//
8064// REQUIRES: len(value) >= 0
8065func MultiDeviceIteratorFromStringHandleOutputTypes(value []tf.DataType) MultiDeviceIteratorFromStringHandleAttr {
8066	return func(m optionalAttr) {
8067		m["output_types"] = value
8068	}
8069}
8070
8071// MultiDeviceIteratorFromStringHandleOutputShapes sets the optional output_shapes attribute to value.
8072//
8073// value: The list of shapes being produced.
8074// If not specified, defaults to <>
8075//
8076// REQUIRES: len(value) >= 0
8077func MultiDeviceIteratorFromStringHandleOutputShapes(value []tf.Shape) MultiDeviceIteratorFromStringHandleAttr {
8078	return func(m optionalAttr) {
8079		m["output_shapes"] = value
8080	}
8081}
8082
8083// Generates a MultiDeviceIterator resource from its provided string handle.
8084//
8085// Arguments:
8086//	string_handle: String representing the resource.
8087//
8088// Returns A MultiDeviceIterator resource.
8089func MultiDeviceIteratorFromStringHandle(scope *Scope, string_handle tf.Output, optional ...MultiDeviceIteratorFromStringHandleAttr) (multi_device_iterator tf.Output) {
8090	if scope.Err() != nil {
8091		return
8092	}
8093	attrs := map[string]interface{}{}
8094	for _, a := range optional {
8095		a(attrs)
8096	}
8097	opspec := tf.OpSpec{
8098		Type: "MultiDeviceIteratorFromStringHandle",
8099		Input: []tf.Input{
8100			string_handle,
8101		},
8102		Attrs: attrs,
8103	}
8104	op := scope.AddOperation(opspec)
8105	return op.Output(0)
8106}
8107
8108// Creates a TensorArray for storing the gradients of values in the given handle.
8109//
8110// If the given TensorArray gradient already exists, returns a reference to it.
8111//
8112// Locks the size of the original TensorArray by disabling its dynamic size flag.
8113//
8114// **A note about the input flow_in:**
8115//
8116// The handle flow_in forces the execution of the gradient lookup to occur
8117// only after certain other operations have occurred.  For example, when
8118// the forward TensorArray is dynamically sized, writes to this TensorArray
8119// may resize the object.  The gradient TensorArray is statically sized based
8120// on the size of the forward TensorArray when this operation executes.
8121// Furthermore, the size of the forward TensorArray is frozen by this call.
8122// As a result, the flow is used to ensure that the call to generate the gradient
8123// TensorArray only happens after all writes are executed.
8124//
8125// In the case of dynamically sized TensorArrays, gradient computation should
8126// only be performed on read operations that have themselves been chained via
8127// flow to occur only after all writes have executed. That way the final size
8128// of the forward TensorArray is known when this operation is called.
8129//
8130// **A note about the source attribute:**
8131//
8132// TensorArray gradient calls use an accumulator TensorArray object.  If
8133// multiple gradients are calculated and run in the same session, the multiple
8134// gradient nodes may accidentally flow through the same accumulator TensorArray.
8135// This double counts and generally breaks the TensorArray gradient flow.
8136//
8137// The solution is to identify which gradient call this particular
8138// TensorArray gradient is being called in.  This is performed by identifying
8139// a unique string (e.g. "gradients", "gradients_1", ...) from the input
8140// gradient Tensor's name.  This string is used as a suffix when creating
8141// the TensorArray gradient object here (the attribute `source`).
8142//
8143// The attribute `source` is added as a suffix to the forward TensorArray's
8144// name when performing the creation / lookup, so that each separate gradient
8145// calculation gets its own TensorArray accumulator.
8146//
8147// Arguments:
8148//	handle: The handle to the forward TensorArray.
8149//	flow_in: A float scalar that enforces proper chaining of operations.
8150//	source: The gradient source string, used to decide which gradient TensorArray
8151// to return.
8152func TensorArrayGradV3(scope *Scope, handle tf.Output, flow_in tf.Output, source string) (grad_handle tf.Output, flow_out tf.Output) {
8153	if scope.Err() != nil {
8154		return
8155	}
8156	attrs := map[string]interface{}{"source": source}
8157	opspec := tf.OpSpec{
8158		Type: "TensorArrayGradV3",
8159		Input: []tf.Input{
8160			handle, flow_in,
8161		},
8162		Attrs: attrs,
8163	}
8164	op := scope.AddOperation(opspec)
8165	return op.Output(0), op.Output(1)
8166}
8167
8168// Produces a string handle for the given MultiDeviceIterator.
8169//
8170// Arguments:
8171//	multi_device_iterator: A MultiDeviceIterator resource.
8172//
8173// Returns A string representing the resource.
8174func MultiDeviceIteratorToStringHandle(scope *Scope, multi_device_iterator tf.Output) (string_handle tf.Output) {
8175	if scope.Err() != nil {
8176		return
8177	}
8178	opspec := tf.OpSpec{
8179		Type: "MultiDeviceIteratorToStringHandle",
8180		Input: []tf.Input{
8181			multi_device_iterator,
8182		},
8183	}
8184	op := scope.AddOperation(opspec)
8185	return op.Output(0)
8186}
8187
8188// QuantizeAndDequantizeV4Attr is an optional argument to QuantizeAndDequantizeV4.
8189type QuantizeAndDequantizeV4Attr func(optionalAttr)
8190
8191// QuantizeAndDequantizeV4SignedInput sets the optional signed_input attribute to value.
8192// If not specified, defaults to true
8193func QuantizeAndDequantizeV4SignedInput(value bool) QuantizeAndDequantizeV4Attr {
8194	return func(m optionalAttr) {
8195		m["signed_input"] = value
8196	}
8197}
8198
8199// QuantizeAndDequantizeV4NumBits sets the optional num_bits attribute to value.
8200// If not specified, defaults to 8
8201func QuantizeAndDequantizeV4NumBits(value int64) QuantizeAndDequantizeV4Attr {
8202	return func(m optionalAttr) {
8203		m["num_bits"] = value
8204	}
8205}
8206
8207// QuantizeAndDequantizeV4RangeGiven sets the optional range_given attribute to value.
8208// If not specified, defaults to false
8209func QuantizeAndDequantizeV4RangeGiven(value bool) QuantizeAndDequantizeV4Attr {
8210	return func(m optionalAttr) {
8211		m["range_given"] = value
8212	}
8213}
8214
8215// QuantizeAndDequantizeV4RoundMode sets the optional round_mode attribute to value.
8216// If not specified, defaults to "HALF_TO_EVEN"
8217func QuantizeAndDequantizeV4RoundMode(value string) QuantizeAndDequantizeV4Attr {
8218	return func(m optionalAttr) {
8219		m["round_mode"] = value
8220	}
8221}
8222
8223// QuantizeAndDequantizeV4NarrowRange sets the optional narrow_range attribute to value.
8224// If not specified, defaults to false
8225func QuantizeAndDequantizeV4NarrowRange(value bool) QuantizeAndDequantizeV4Attr {
8226	return func(m optionalAttr) {
8227		m["narrow_range"] = value
8228	}
8229}
8230
8231// QuantizeAndDequantizeV4Axis sets the optional axis attribute to value.
8232// If not specified, defaults to -1
8233func QuantizeAndDequantizeV4Axis(value int64) QuantizeAndDequantizeV4Attr {
8234	return func(m optionalAttr) {
8235		m["axis"] = value
8236	}
8237}
8238
8239// Returns the gradient of `QuantizeAndDequantizeV4`.
8240//
8241// This is almost identical to QuantizeAndDequantizeV2, except that it returns a
8242// gradient of 1 for inputs that are within the quantization range, or 0 otherwise.
8243func QuantizeAndDequantizeV4(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, optional ...QuantizeAndDequantizeV4Attr) (output tf.Output) {
8244	if scope.Err() != nil {
8245		return
8246	}
8247	attrs := map[string]interface{}{}
8248	for _, a := range optional {
8249		a(attrs)
8250	}
8251	opspec := tf.OpSpec{
8252		Type: "QuantizeAndDequantizeV4",
8253		Input: []tf.Input{
8254			input, input_min, input_max,
8255		},
8256		Attrs: attrs,
8257	}
8258	op := scope.AddOperation(opspec)
8259	return op.Output(0)
8260}
8261
8262// Gets next element for the provided shard number.
8263//
8264// Arguments:
8265//	multi_device_iterator: A MultiDeviceIterator resource.
8266//	shard_num: Integer representing which shard to fetch data for.
8267//	incarnation_id: Which incarnation of the MultiDeviceIterator is running.
8268//	output_types: The type list for the return values.
8269//	output_shapes: The list of shapes being produced.
8270//
8271// Returns Result of the get_next on the dataset.
8272func MultiDeviceIteratorGetNextFromShard(scope *Scope, multi_device_iterator tf.Output, shard_num tf.Output, incarnation_id tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (components []tf.Output) {
8273	if scope.Err() != nil {
8274		return
8275	}
8276	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
8277	opspec := tf.OpSpec{
8278		Type: "MultiDeviceIteratorGetNextFromShard",
8279		Input: []tf.Input{
8280			multi_device_iterator, shard_num, incarnation_id,
8281		},
8282		Attrs: attrs,
8283	}
8284	op := scope.AddOperation(opspec)
8285	if scope.Err() != nil {
8286		return
8287	}
8288	var idx int
8289	var err error
8290	if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
8291		scope.UpdateErr("MultiDeviceIteratorGetNextFromShard", err)
8292		return
8293	}
8294	return components
8295}
8296
8297// Creates a MultiDeviceIterator resource.
8298//
8299// Arguments:
8300//	devices: A list of devices the iterator works across.
8301//	shared_name: If non-empty, this resource will be shared under the given name
8302// across multiple sessions.
8303//	container: If non-empty, this resource is placed in the given container.
8304// Otherwise, a default container is used.
8305//	output_types: The type list for the return values.
8306//	output_shapes: The list of shapes being produced.
8307//
8308// Returns Handle to the resource created.
8309func MultiDeviceIterator(scope *Scope, devices []string, shared_name string, container string, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
8310	if scope.Err() != nil {
8311		return
8312	}
8313	attrs := map[string]interface{}{"devices": devices, "shared_name": shared_name, "container": container, "output_types": output_types, "output_shapes": output_shapes}
8314	opspec := tf.OpSpec{
8315		Type: "MultiDeviceIterator",
8316
8317		Attrs: attrs,
8318	}
8319	op := scope.AddOperation(opspec)
8320	return op.Output(0)
8321}
8322
8323// BoostedTreesCalculateBestFeatureSplitAttr is an optional argument to BoostedTreesCalculateBestFeatureSplit.
8324type BoostedTreesCalculateBestFeatureSplitAttr func(optionalAttr)
8325
8326// BoostedTreesCalculateBestFeatureSplitSplitType sets the optional split_type attribute to value.
8327//
8328// value: A string indicating if this Op should perform inequality split or equality split.
8329// If not specified, defaults to "inequality"
8330func BoostedTreesCalculateBestFeatureSplitSplitType(value string) BoostedTreesCalculateBestFeatureSplitAttr {
8331	return func(m optionalAttr) {
8332		m["split_type"] = value
8333	}
8334}
8335
8336// Calculates gains for each feature and returns the best possible split information for the feature.
8337//
8338// The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature.
8339//
8340// It is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return `node_ids_list` for each feature, containing the list of nodes that this feature can be used to split.
8341//
8342// In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features).
8343//
8344// The output shapes are compatible in a way that the first dimension of all tensors are the same and equal to the number of possible split nodes for each feature.
8345//
8346// Arguments:
8347//	node_id_range: A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within `stats_summary_list`. The nodes are iterated between the two nodes specified by the tensor, as like `for node_id in range(node_id_range[0], node_id_range[1])` (Note that the last index node_id_range[1] is exclusive).
8348//	stats_summary: A Rank 4 tensor (#shape=[max_splits, feature_dims, bucket, stats_dims]) for accumulated stats summary (gradient/hessian) per node, per dimension, per buckets for each feature.
8349// The first dimension of the tensor is the maximum number of splits, and thus not all elements of it will be used, but only the indexes specified by node_ids will be used.
8350//	l1: l1 regularization factor on leaf weights, per instance based.
8351//	l2: l2 regularization factor on leaf weights, per instance based.
8352//	tree_complexity: adjustment to the gain, per leaf based.
8353//	min_node_weight: minimum avg of hessians in a node before required for the node to be considered for splitting.
8354//	logits_dimension: The dimension of logit, i.e., number of classes.
8355//
8356// Returns:
8357//	node_ids: A Rank 1 tensors indicating possible split node ids for each feature. The length of the list is num_features, but each tensor has different size as each feature provides different possible nodes. See above for details like shapes and sizes.
8358//	gains: A Rank 1 tensors indicating the best gains for each feature to split for certain nodes. See above for details like shapes and sizes.
8359//	feature_dimensions: A Rank 1 tensors indicating the best feature dimension for each feature to split for certain nodes if the feature is multi-dimension. See above for details like shapes and sizes.
8360//	thresholds: A Rank 1 tensors indicating the bucket id to compare with (as a threshold) for split in each node. See above for details like shapes and sizes.
8361//	left_node_contribs: A Rank 2 tensors indicating the contribution of the left nodes when branching from parent nodes (given by the tensor element in the output node_ids_list) to the left direction by the given threshold for each feature. This value will be used to make the left node value by adding to the parent node value. Second dimension size is 1 for 1-dimensional logits, but would be larger for multi-class problems. See above for details like shapes and sizes.
8362//	right_node_contribs: A Rank 2 tensors, with the same shape/conditions as left_node_contribs_list, but just that the value is for the right node.
8363//	split_with_default_directions: A Rank 1 tensors indicating the which direction to go if data is missing. See above for details like shapes and sizes.
8364// Inequality with default left returns 0, inequality with default right returns 1, equality with default right returns 2.
8365func BoostedTreesCalculateBestFeatureSplit(scope *Scope, node_id_range tf.Output, stats_summary tf.Output, l1 tf.Output, l2 tf.Output, tree_complexity tf.Output, min_node_weight tf.Output, logits_dimension int64, optional ...BoostedTreesCalculateBestFeatureSplitAttr) (node_ids tf.Output, gains tf.Output, feature_dimensions tf.Output, thresholds tf.Output, left_node_contribs tf.Output, right_node_contribs tf.Output, split_with_default_directions tf.Output) {
8366	if scope.Err() != nil {
8367		return
8368	}
8369	attrs := map[string]interface{}{"logits_dimension": logits_dimension}
8370	for _, a := range optional {
8371		a(attrs)
8372	}
8373	opspec := tf.OpSpec{
8374		Type: "BoostedTreesCalculateBestFeatureSplit",
8375		Input: []tf.Input{
8376			node_id_range, stats_summary, l1, l2, tree_complexity, min_node_weight,
8377		},
8378		Attrs: attrs,
8379	}
8380	op := scope.AddOperation(opspec)
8381	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4), op.Output(5), op.Output(6)
8382}
8383
8384// Wraps the XLA DynamicUpdateSlice operator, documented at
8385//
8386//  https://www.tensorflow.org/performance/xla/operation_semantics#dynamicupdateslice
8387// .
8388//
8389// XlaDynamicUpdateSlice generates a result which is the value of the `input`
8390// operand, with a slice update overwritten at `indices`. The shape of `update`
8391// determines the shape of the sub-array of the result which is updated. The shape
8392// of indices must be rank == 1, with dimension size equal to the rank of `input`.
8393//
8394// Handling of out-of-bounds slice indices is implementation-defined.
8395//
8396// Arguments:
8397//	input: A `Tensor` of type T.
8398//	update: A `Tensor` of type T. Same rank as `input`.
8399//	indices: A vector of indices into `input`. Must have length equal to the rank of
8400// `input`.
8401//
8402// Returns A `Tensor` of type T.
8403func XlaDynamicUpdateSlice(scope *Scope, input tf.Output, update tf.Output, indices tf.Output) (output tf.Output) {
8404	if scope.Err() != nil {
8405		return
8406	}
8407	opspec := tf.OpSpec{
8408		Type: "XlaDynamicUpdateSlice",
8409		Input: []tf.Input{
8410			input, update, indices,
8411		},
8412	}
8413	op := scope.AddOperation(opspec)
8414	return op.Output(0)
8415}
8416
8417// ModelDatasetAttr is an optional argument to ModelDataset.
8418type ModelDatasetAttr func(optionalAttr)
8419
8420// ModelDatasetAlgorithm sets the optional algorithm attribute to value.
8421// If not specified, defaults to 0
8422func ModelDatasetAlgorithm(value int64) ModelDatasetAttr {
8423	return func(m optionalAttr) {
8424		m["algorithm"] = value
8425	}
8426}
8427
8428// ModelDatasetCpuBudget sets the optional cpu_budget attribute to value.
8429// If not specified, defaults to 0
8430func ModelDatasetCpuBudget(value int64) ModelDatasetAttr {
8431	return func(m optionalAttr) {
8432		m["cpu_budget"] = value
8433	}
8434}
8435
8436// ModelDatasetRamBudget sets the optional ram_budget attribute to value.
8437// If not specified, defaults to 0
8438func ModelDatasetRamBudget(value int64) ModelDatasetAttr {
8439	return func(m optionalAttr) {
8440		m["ram_budget"] = value
8441	}
8442}
8443
8444// Identity transformation that models performance.
8445//
8446// Identity transformation that models performance.
8447//
8448// Arguments:
8449//	input_dataset: A variant tensor representing the input dataset.
8450//
8451//
8452func ModelDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ModelDatasetAttr) (handle tf.Output) {
8453	if scope.Err() != nil {
8454		return
8455	}
8456	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
8457	for _, a := range optional {
8458		a(attrs)
8459	}
8460	opspec := tf.OpSpec{
8461		Type: "ModelDataset",
8462		Input: []tf.Input{
8463			input_dataset,
8464		},
8465		Attrs: attrs,
8466	}
8467	op := scope.AddOperation(opspec)
8468	return op.Output(0)
8469}
8470
8471// Returns a list of tensors with the same shapes and contents as the input
8472//
8473// tensors.
8474//
8475// This op can be used to override the gradient for complicated functions. For
8476// example, suppose y = f(x) and we wish to apply a custom function g for backprop
8477// such that dx = g(dy). In Python,
8478//
8479// ```python
8480// with tf.get_default_graph().gradient_override_map(
8481//     {'IdentityN': 'OverrideGradientWithG'}):
8482//   y, _ = identity_n([f(x), x])
8483//
8484// @tf.RegisterGradient('OverrideGradientWithG')
8485// def ApplyG(op, dy, _):
8486//   return [None, g(dy)]  # Do not backprop to f(x).
8487// ```
8488func IdentityN(scope *Scope, input []tf.Output) (output []tf.Output) {
8489	if scope.Err() != nil {
8490		return
8491	}
8492	opspec := tf.OpSpec{
8493		Type: "IdentityN",
8494		Input: []tf.Input{
8495			tf.OutputList(input),
8496		},
8497	}
8498	op := scope.AddOperation(opspec)
8499	if scope.Err() != nil {
8500		return
8501	}
8502	var idx int
8503	var err error
8504	if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
8505		scope.UpdateErr("IdentityN", err)
8506		return
8507	}
8508	return output
8509}
8510
8511// Returns true if and only if the given Optional variant has a value.
8512func OptionalHasValue(scope *Scope, optional tf.Output) (has_value tf.Output) {
8513	if scope.Err() != nil {
8514		return
8515	}
8516	opspec := tf.OpSpec{
8517		Type: "OptionalHasValue",
8518		Input: []tf.Input{
8519			optional,
8520		},
8521	}
8522	op := scope.AddOperation(opspec)
8523	return op.Output(0)
8524}
8525
8526// Constructs an Optional variant from a tuple of tensors.
8527func OptionalFromValue(scope *Scope, components []tf.Output) (optional tf.Output) {
8528	if scope.Err() != nil {
8529		return
8530	}
8531	opspec := tf.OpSpec{
8532		Type: "OptionalFromValue",
8533		Input: []tf.Input{
8534			tf.OutputList(components),
8535		},
8536	}
8537	op := scope.AddOperation(opspec)
8538	return op.Output(0)
8539}
8540
8541// OptimizeDatasetV2Attr is an optional argument to OptimizeDatasetV2.
8542type OptimizeDatasetV2Attr func(optionalAttr)
8543
8544// OptimizeDatasetV2OptimizationConfigs sets the optional optimization_configs attribute to value.
8545// If not specified, defaults to <>
8546func OptimizeDatasetV2OptimizationConfigs(value []string) OptimizeDatasetV2Attr {
8547	return func(m optionalAttr) {
8548		m["optimization_configs"] = value
8549	}
8550}
8551
8552// Creates a dataset by applying related optimizations to `input_dataset`.
8553//
8554// Creates a dataset by applying related optimizations to `input_dataset`.
8555//
8556// Arguments:
8557//	input_dataset: A variant tensor representing the input dataset.
8558//	optimizations_enabled: A `tf.string` vector `tf.Tensor` identifying user enabled optimizations.
8559//	optimizations_disabled: A `tf.string` vector `tf.Tensor` identifying user disabled optimizations.
8560//	optimizations_default: A `tf.string` vector `tf.Tensor` identifying optimizations by default.
8561//
8562//
8563func OptimizeDatasetV2(scope *Scope, input_dataset tf.Output, optimizations_enabled tf.Output, optimizations_disabled tf.Output, optimizations_default tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...OptimizeDatasetV2Attr) (handle tf.Output) {
8564	if scope.Err() != nil {
8565		return
8566	}
8567	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
8568	for _, a := range optional {
8569		a(attrs)
8570	}
8571	opspec := tf.OpSpec{
8572		Type: "OptimizeDatasetV2",
8573		Input: []tf.Input{
8574			input_dataset, optimizations_enabled, optimizations_disabled, optimizations_default,
8575		},
8576		Attrs: attrs,
8577	}
8578	op := scope.AddOperation(opspec)
8579	return op.Output(0)
8580}
8581
8582// OptimizeDatasetAttr is an optional argument to OptimizeDataset.
8583type OptimizeDatasetAttr func(optionalAttr)
8584
8585// OptimizeDatasetOptimizationConfigs sets the optional optimization_configs attribute to value.
8586// If not specified, defaults to <>
8587func OptimizeDatasetOptimizationConfigs(value []string) OptimizeDatasetAttr {
8588	return func(m optionalAttr) {
8589		m["optimization_configs"] = value
8590	}
8591}
8592
8593// Creates a dataset by applying optimizations to `input_dataset`.
8594//
8595// Creates a dataset by applying optimizations to `input_dataset`.
8596//
8597// Arguments:
8598//	input_dataset: A variant tensor representing the input dataset.
8599//	optimizations: A `tf.string` vector `tf.Tensor` identifying optimizations to use.
8600//
8601//
8602func OptimizeDataset(scope *Scope, input_dataset tf.Output, optimizations tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...OptimizeDatasetAttr) (handle tf.Output) {
8603	if scope.Err() != nil {
8604		return
8605	}
8606	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
8607	for _, a := range optional {
8608		a(attrs)
8609	}
8610	opspec := tf.OpSpec{
8611		Type: "OptimizeDataset",
8612		Input: []tf.Input{
8613			input_dataset, optimizations,
8614		},
8615		Attrs: attrs,
8616	}
8617	op := scope.AddOperation(opspec)
8618	return op.Output(0)
8619}
8620
8621// Converts the given `resource_handle` representing an iterator to a string.
8622//
8623// Arguments:
8624//	resource_handle: A handle to an iterator resource.
8625//
8626// Returns A string representation of the given handle.
8627func IteratorToStringHandle(scope *Scope, resource_handle tf.Output) (string_handle tf.Output) {
8628	if scope.Err() != nil {
8629		return
8630	}
8631	opspec := tf.OpSpec{
8632		Type: "IteratorToStringHandle",
8633		Input: []tf.Input{
8634			resource_handle,
8635		},
8636	}
8637	op := scope.AddOperation(opspec)
8638	return op.Output(0)
8639}
8640
8641// Gets the next output from the given iterator.
8642//
8643// This operation is a synchronous version IteratorGetNext. It should only be used
8644// in situations where the iterator does not block the calling thread, or where
8645// the calling thread is not a member of the thread pool used to execute parallel
8646// operations (e.g. in eager mode).
8647func IteratorGetNextSync(scope *Scope, iterator tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (components []tf.Output) {
8648	if scope.Err() != nil {
8649		return
8650	}
8651	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
8652	opspec := tf.OpSpec{
8653		Type: "IteratorGetNextSync",
8654		Input: []tf.Input{
8655			iterator,
8656		},
8657		Attrs: attrs,
8658	}
8659	op := scope.AddOperation(opspec)
8660	if scope.Err() != nil {
8661		return
8662	}
8663	var idx int
8664	var err error
8665	if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
8666		scope.UpdateErr("IteratorGetNextSync", err)
8667		return
8668	}
8669	return components
8670}
8671
8672// Makes a new iterator from the given `dataset` and stores it in `iterator`.
8673//
8674// This operation may be executed multiple times. Each execution will reset the
8675// iterator in `iterator` to the first element of `dataset`.
8676//
8677// Returns the created operation.
8678func MakeIterator(scope *Scope, dataset tf.Output, iterator tf.Output) (o *tf.Operation) {
8679	if scope.Err() != nil {
8680		return
8681	}
8682	opspec := tf.OpSpec{
8683		Type: "MakeIterator",
8684		Input: []tf.Input{
8685			dataset, iterator,
8686		},
8687	}
8688	return scope.AddOperation(opspec)
8689}
8690
8691// A container for an iterator resource.
8692//
8693// Arguments:
8694//	handle: A handle to the iterator to delete.
8695//	deleter: A variant deleter.
8696//
8697// Returns the created operation.
8698func DeleteIterator(scope *Scope, handle tf.Output, deleter tf.Output) (o *tf.Operation) {
8699	if scope.Err() != nil {
8700		return
8701	}
8702	opspec := tf.OpSpec{
8703		Type: "DeleteIterator",
8704		Input: []tf.Input{
8705			handle, deleter,
8706		},
8707	}
8708	return scope.AddOperation(opspec)
8709}
8710
8711// Splits a tensor into `num_split` tensors along one dimension.
8712//
8713// Arguments:
8714//	value: The tensor to split.
8715//	size_splits: list containing the sizes of each output tensor along the split
8716// dimension. Must sum to the dimension of value along split_dim.
8717// Can contain one -1 indicating that dimension is to be inferred.
8718//	axis: 0-D.  The dimension along which to split.  Must be in the range
8719// `[-rank(value), rank(value))`.
8720//
8721//
8722// Returns Tensors whose shape matches that of `value`
8723// except along `axis`, where their sizes are
8724// `size_splits[i]`.
8725func SplitV(scope *Scope, value tf.Output, size_splits tf.Output, axis tf.Output, num_split int64) (output []tf.Output) {
8726	if scope.Err() != nil {
8727		return
8728	}
8729	attrs := map[string]interface{}{"num_split": num_split}
8730	opspec := tf.OpSpec{
8731		Type: "SplitV",
8732		Input: []tf.Input{
8733			value, size_splits, axis,
8734		},
8735		Attrs: attrs,
8736	}
8737	op := scope.AddOperation(opspec)
8738	if scope.Err() != nil {
8739		return
8740	}
8741	var idx int
8742	var err error
8743	if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
8744		scope.UpdateErr("SplitV", err)
8745		return
8746	}
8747	return output
8748}
8749
8750// A container for an iterator resource.
8751//
8752// Returns:
8753//	handle: A handle to the iterator that can be passed to a "MakeIterator" or
8754// "IteratorGetNext" op. In contrast to Iterator, AnonymousIterator prevents
8755// resource sharing by name, and does not keep a reference to the resource
8756// container.
8757//	deleter: A variant deleter that should be passed into the op that deletes the iterator.
8758func AnonymousIteratorV2(scope *Scope, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output, deleter tf.Output) {
8759	if scope.Err() != nil {
8760		return
8761	}
8762	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
8763	opspec := tf.OpSpec{
8764		Type: "AnonymousIteratorV2",
8765
8766		Attrs: attrs,
8767	}
8768	op := scope.AddOperation(opspec)
8769	return op.Output(0), op.Output(1)
8770}
8771
8772// A container for an iterator resource.
8773//
8774// Returns A handle to the iterator that can be passed to a "MakeIterator" or
8775// "IteratorGetNext" op. In contrast to Iterator, AnonymousIterator prevents
8776// resource sharing by name, and does not keep a reference to the resource
8777// container.
8778func AnonymousIterator(scope *Scope, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
8779	if scope.Err() != nil {
8780		return
8781	}
8782	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
8783	opspec := tf.OpSpec{
8784		Type: "AnonymousIterator",
8785
8786		Attrs: attrs,
8787	}
8788	op := scope.AddOperation(opspec)
8789	return op.Output(0)
8790}
8791
8792// A container for an iterator resource.
8793//
8794// Returns A handle to the iterator that can be passed to a "MakeIterator"
8795// or "IteratorGetNext" op.
8796func Iterator(scope *Scope, shared_name string, container string, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
8797	if scope.Err() != nil {
8798		return
8799	}
8800	attrs := map[string]interface{}{"shared_name": shared_name, "container": container, "output_types": output_types, "output_shapes": output_shapes}
8801	opspec := tf.OpSpec{
8802		Type: "Iterator",
8803
8804		Attrs: attrs,
8805	}
8806	op := scope.AddOperation(opspec)
8807	return op.Output(0)
8808}
8809
8810// Creates a dataset that emits the records from one or more TFRecord files.
8811//
8812// Arguments:
8813//	filenames: A scalar or vector containing the name(s) of the file(s) to be
8814// read.
8815//	compression_type: A scalar containing either (i) the empty string (no
8816// compression), (ii) "ZLIB", or (iii) "GZIP".
8817//	buffer_size: A scalar representing the number of bytes to buffer. A value of
8818// 0 means no buffering will be performed.
8819func TFRecordDataset(scope *Scope, filenames tf.Output, compression_type tf.Output, buffer_size tf.Output) (handle tf.Output) {
8820	if scope.Err() != nil {
8821		return
8822	}
8823	opspec := tf.OpSpec{
8824		Type: "TFRecordDataset",
8825		Input: []tf.Input{
8826			filenames, compression_type, buffer_size,
8827		},
8828	}
8829	op := scope.AddOperation(opspec)
8830	return op.Output(0)
8831}
8832
8833// Creates a dataset that emits the records from one or more binary files.
8834//
8835// Arguments:
8836//	filenames: A scalar or a vector containing the name(s) of the file(s) to be
8837// read.
8838//	header_bytes: A scalar representing the number of bytes to skip at the
8839// beginning of a file.
8840//	record_bytes: A scalar representing the number of bytes in each record.
8841//	footer_bytes: A scalar representing the number of bytes to skip at the end
8842// of a file.
8843//	buffer_size: A scalar representing the number of bytes to buffer. Must be > 0.
8844func FixedLengthRecordDataset(scope *Scope, filenames tf.Output, header_bytes tf.Output, record_bytes tf.Output, footer_bytes tf.Output, buffer_size tf.Output) (handle tf.Output) {
8845	if scope.Err() != nil {
8846		return
8847	}
8848	opspec := tf.OpSpec{
8849		Type: "FixedLengthRecordDataset",
8850		Input: []tf.Input{
8851			filenames, header_bytes, record_bytes, footer_bytes, buffer_size,
8852		},
8853	}
8854	op := scope.AddOperation(opspec)
8855	return op.Output(0)
8856}
8857
8858// Creates a dataset that emits the lines of one or more text files.
8859//
8860// Arguments:
8861//	filenames: A scalar or a vector containing the name(s) of the file(s) to be
8862// read.
8863//	compression_type: A scalar containing either (i) the empty string (no
8864// compression), (ii) "ZLIB", or (iii) "GZIP".
8865//	buffer_size: A scalar containing the number of bytes to buffer.
8866func TextLineDataset(scope *Scope, filenames tf.Output, compression_type tf.Output, buffer_size tf.Output) (handle tf.Output) {
8867	if scope.Err() != nil {
8868		return
8869	}
8870	opspec := tf.OpSpec{
8871		Type: "TextLineDataset",
8872		Input: []tf.Input{
8873			filenames, compression_type, buffer_size,
8874		},
8875	}
8876	op := scope.AddOperation(opspec)
8877	return op.Output(0)
8878}
8879
8880// A container for an iterator resource.
8881//
8882// Arguments:
8883//	multi_device_iterator: A handle to the multi device iterator to delete.
8884//	iterators: A list of iterator handles (unused). This is added so that automatic control dependencies get added during function tracing that ensure this op runs after all the dependent iterators are deleted.
8885//	deleter: A variant deleter.
8886//
8887// Returns the created operation.
8888func DeleteMultiDeviceIterator(scope *Scope, multi_device_iterator tf.Output, iterators []tf.Output, deleter tf.Output) (o *tf.Operation) {
8889	if scope.Err() != nil {
8890		return
8891	}
8892	opspec := tf.OpSpec{
8893		Type: "DeleteMultiDeviceIterator",
8894		Input: []tf.Input{
8895			multi_device_iterator, tf.OutputList(iterators), deleter,
8896		},
8897	}
8898	return scope.AddOperation(opspec)
8899}
8900
8901// Creates a dataset with a range of values. Corresponds to python's xrange.
8902//
8903// Arguments:
8904//	start: corresponds to start in python's xrange().
8905//	stop: corresponds to stop in python's xrange().
8906//	step: corresponds to step in python's xrange().
8907//
8908//
8909func RangeDataset(scope *Scope, start tf.Output, stop tf.Output, step tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
8910	if scope.Err() != nil {
8911		return
8912	}
8913	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
8914	opspec := tf.OpSpec{
8915		Type: "RangeDataset",
8916		Input: []tf.Input{
8917			start, stop, step,
8918		},
8919		Attrs: attrs,
8920	}
8921	op := scope.AddOperation(opspec)
8922	return op.Output(0)
8923}
8924
8925// Creates a dataset that batches and pads `batch_size` elements from the input.
8926//
8927// Arguments:
8928//
8929//	batch_size: A scalar representing the number of elements to accumulate in a
8930// batch.
8931//	padded_shapes: A list of int64 tensors representing the desired padded shapes
8932// of the corresponding output components. These shapes may be partially
8933// specified, using `-1` to indicate that a particular dimension should be
8934// padded to the maximum size of all batch elements.
8935//	padding_values: A list of scalars containing the padding value to use for
8936// each of the outputs.
8937//
8938func PaddedBatchDataset(scope *Scope, input_dataset tf.Output, batch_size tf.Output, padded_shapes []tf.Output, padding_values []tf.Output, output_shapes []tf.Shape) (handle tf.Output) {
8939	if scope.Err() != nil {
8940		return
8941	}
8942	attrs := map[string]interface{}{"output_shapes": output_shapes}
8943	opspec := tf.OpSpec{
8944		Type: "PaddedBatchDataset",
8945		Input: []tf.Input{
8946			input_dataset, batch_size, tf.OutputList(padded_shapes), tf.OutputList(padding_values),
8947		},
8948		Attrs: attrs,
8949	}
8950	op := scope.AddOperation(opspec)
8951	return op.Output(0)
8952}
8953
8954// BatchDatasetV2Attr is an optional argument to BatchDatasetV2.
8955type BatchDatasetV2Attr func(optionalAttr)
8956
8957// BatchDatasetV2ParallelCopy sets the optional parallel_copy attribute to value.
8958// If not specified, defaults to false
8959func BatchDatasetV2ParallelCopy(value bool) BatchDatasetV2Attr {
8960	return func(m optionalAttr) {
8961		m["parallel_copy"] = value
8962	}
8963}
8964
8965// Creates a dataset that batches `batch_size` elements from `input_dataset`.
8966//
8967// Arguments:
8968//
8969//	batch_size: A scalar representing the number of elements to accumulate in a batch.
8970//	drop_remainder: A scalar representing whether the last batch should be dropped in case its size
8971// is smaller than desired.
8972//
8973//
8974func BatchDatasetV2(scope *Scope, input_dataset tf.Output, batch_size tf.Output, drop_remainder tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...BatchDatasetV2Attr) (handle tf.Output) {
8975	if scope.Err() != nil {
8976		return
8977	}
8978	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
8979	for _, a := range optional {
8980		a(attrs)
8981	}
8982	opspec := tf.OpSpec{
8983		Type: "BatchDatasetV2",
8984		Input: []tf.Input{
8985			input_dataset, batch_size, drop_remainder,
8986		},
8987		Attrs: attrs,
8988	}
8989	op := scope.AddOperation(opspec)
8990	return op.Output(0)
8991}
8992
8993// ShuffleDatasetAttr is an optional argument to ShuffleDataset.
8994type ShuffleDatasetAttr func(optionalAttr)
8995
8996// ShuffleDatasetReshuffleEachIteration sets the optional reshuffle_each_iteration attribute to value.
8997//
8998// value: If true, each iterator over this dataset will be given
8999// a different pseudorandomly generated seed, based on a sequence seeded by the
9000// `seed` and `seed2` inputs. If false, each iterator will be given the same
9001// seed, and repeated iteration over this dataset will yield the exact same
9002// sequence of results.
9003// If not specified, defaults to true
9004func ShuffleDatasetReshuffleEachIteration(value bool) ShuffleDatasetAttr {
9005	return func(m optionalAttr) {
9006		m["reshuffle_each_iteration"] = value
9007	}
9008}
9009
9010// Creates a dataset that shuffles elements from `input_dataset` pseudorandomly.
9011//
9012// Arguments:
9013//
9014//	buffer_size: The number of output elements to buffer in an iterator over
9015// this dataset. Compare with the `min_after_dequeue` attr when creating a
9016// `RandomShuffleQueue`.
9017//	seed: A scalar seed for the random number generator. If either `seed` or
9018// `seed2` is set to be non-zero, the random number generator is seeded
9019// by the given seed.  Otherwise, a random seed is used.
9020//	seed2: A second scalar seed to avoid seed collision.
9021//
9022//
9023func ShuffleDataset(scope *Scope, input_dataset tf.Output, buffer_size tf.Output, seed tf.Output, seed2 tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ShuffleDatasetAttr) (handle tf.Output) {
9024	if scope.Err() != nil {
9025		return
9026	}
9027	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
9028	for _, a := range optional {
9029		a(attrs)
9030	}
9031	opspec := tf.OpSpec{
9032		Type: "ShuffleDataset",
9033		Input: []tf.Input{
9034			input_dataset, buffer_size, seed, seed2,
9035		},
9036		Attrs: attrs,
9037	}
9038	op := scope.AddOperation(opspec)
9039	return op.Output(0)
9040}
9041
9042// Creates a dataset containing elements of first component of `input_dataset` having true in the last component.
9043func FilterByLastComponentDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (output tf.Output) {
9044	if scope.Err() != nil {
9045		return
9046	}
9047	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
9048	opspec := tf.OpSpec{
9049		Type: "FilterByLastComponentDataset",
9050		Input: []tf.Input{
9051			input_dataset,
9052		},
9053		Attrs: attrs,
9054	}
9055	op := scope.AddOperation(opspec)
9056	return op.Output(0)
9057}
9058
9059// Make a static dimension into a xla bounded dynamic dimension.
9060//
9061//         The current static dimension size will become the bound and the second
9062//         operand becomes the dynamic size of the dimension.
9063func XlaSetDynamicDimensionSize(scope *Scope, input tf.Output, dim_index tf.Output, size tf.Output) (output tf.Output) {
9064	if scope.Err() != nil {
9065		return
9066	}
9067	opspec := tf.OpSpec{
9068		Type: "XlaSetDynamicDimensionSize",
9069		Input: []tf.Input{
9070			input, dim_index, size,
9071		},
9072	}
9073	op := scope.AddOperation(opspec)
9074	return op.Output(0)
9075}
9076
9077// PrefetchDatasetAttr is an optional argument to PrefetchDataset.
9078type PrefetchDatasetAttr func(optionalAttr)
9079
9080// PrefetchDatasetSlackPeriod sets the optional slack_period attribute to value.
9081// If not specified, defaults to 0
9082func PrefetchDatasetSlackPeriod(value int64) PrefetchDatasetAttr {
9083	return func(m optionalAttr) {
9084		m["slack_period"] = value
9085	}
9086}
9087
9088// PrefetchDatasetLegacyAutotune sets the optional legacy_autotune attribute to value.
9089// If not specified, defaults to true
9090func PrefetchDatasetLegacyAutotune(value bool) PrefetchDatasetAttr {
9091	return func(m optionalAttr) {
9092		m["legacy_autotune"] = value
9093	}
9094}
9095
9096// PrefetchDatasetBufferSizeMin sets the optional buffer_size_min attribute to value.
9097// If not specified, defaults to 0
9098func PrefetchDatasetBufferSizeMin(value int64) PrefetchDatasetAttr {
9099	return func(m optionalAttr) {
9100		m["buffer_size_min"] = value
9101	}
9102}
9103
9104// Creates a dataset that asynchronously prefetches elements from `input_dataset`.
9105//
9106// Arguments:
9107//
9108//	buffer_size: The maximum number of elements to buffer in an iterator over
9109// this dataset.
9110//
9111//
9112func PrefetchDataset(scope *Scope, input_dataset tf.Output, buffer_size tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...PrefetchDatasetAttr) (handle tf.Output) {
9113	if scope.Err() != nil {
9114		return
9115	}
9116	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
9117	for _, a := range optional {
9118		a(attrs)
9119	}
9120	opspec := tf.OpSpec{
9121		Type: "PrefetchDataset",
9122		Input: []tf.Input{
9123			input_dataset, buffer_size,
9124		},
9125		Attrs: attrs,
9126	}
9127	op := scope.AddOperation(opspec)
9128	return op.Output(0)
9129}
9130
9131// Forwards the input to the output.
9132//
9133// This operator represents the loop termination condition used by the
9134// "pivot" switches of a loop.
9135//
9136// Arguments:
9137//	input: A boolean scalar, representing the branch predicate of the Switch op.
9138//
9139// Returns The same tensor as `input`.
9140func LoopCond(scope *Scope, input tf.Output) (output tf.Output) {
9141	if scope.Err() != nil {
9142		return
9143	}
9144	opspec := tf.OpSpec{
9145		Type: "LoopCond",
9146		Input: []tf.Input{
9147			input,
9148		},
9149	}
9150	op := scope.AddOperation(opspec)
9151	return op.Output(0)
9152}
9153
9154// Creates a dataset that skips `count` elements from the `input_dataset`.
9155//
9156// Arguments:
9157//
9158//	count: A scalar representing the number of elements from the `input_dataset`
9159// that should be skipped.  If count is -1, skips everything.
9160//
9161//
9162func SkipDataset(scope *Scope, input_dataset tf.Output, count tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
9163	if scope.Err() != nil {
9164		return
9165	}
9166	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
9167	opspec := tf.OpSpec{
9168		Type: "SkipDataset",
9169		Input: []tf.Input{
9170			input_dataset, count,
9171		},
9172		Attrs: attrs,
9173	}
9174	op := scope.AddOperation(opspec)
9175	return op.Output(0)
9176}
9177
9178// Creates a dataset that emits the outputs of `input_dataset` `count` times.
9179//
9180// Arguments:
9181//
9182//	count: A scalar representing the number of times that `input_dataset` should
9183// be repeated. A value of `-1` indicates that it should be repeated infinitely.
9184//
9185//
9186func RepeatDataset(scope *Scope, input_dataset tf.Output, count tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
9187	if scope.Err() != nil {
9188		return
9189	}
9190	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
9191	opspec := tf.OpSpec{
9192		Type: "RepeatDataset",
9193		Input: []tf.Input{
9194			input_dataset, count,
9195		},
9196		Attrs: attrs,
9197	}
9198	op := scope.AddOperation(opspec)
9199	return op.Output(0)
9200}
9201
9202// UnpackAttr is an optional argument to Unpack.
9203type UnpackAttr func(optionalAttr)
9204
9205// UnpackAxis sets the optional axis attribute to value.
9206//
9207// value: Dimension along which to unpack.  Negative values wrap around, so the
9208// valid range is `[-R, R)`.
9209// If not specified, defaults to 0
9210func UnpackAxis(value int64) UnpackAttr {
9211	return func(m optionalAttr) {
9212		m["axis"] = value
9213	}
9214}
9215
9216// Unpacks a given dimension of a rank-`R` tensor into `num` rank-`(R-1)` tensors.
9217//
9218// Unpacks `num` tensors from `value` by chipping it along the `axis` dimension.
9219// For example, given a tensor of shape `(A, B, C, D)`;
9220//
9221// If `axis == 0` then the i'th tensor in `output` is the slice `value[i, :, :, :]`
9222//   and each tensor in `output` will have shape `(B, C, D)`. (Note that the
9223//   dimension unpacked along is gone, unlike `split`).
9224//
9225// If `axis == 1` then the i'th tensor in `output` is the slice `value[:, i, :, :]`
9226//   and each tensor in `output` will have shape `(A, C, D)`.
9227// Etc.
9228//
9229// This is the opposite of `pack`.
9230//
9231// Arguments:
9232//	value: 1-D or higher, with `axis` dimension size equal to `num`.
9233//
9234//
9235// Returns The list of tensors unpacked from `value`.
9236func Unpack(scope *Scope, value tf.Output, num int64, optional ...UnpackAttr) (output []tf.Output) {
9237	if scope.Err() != nil {
9238		return
9239	}
9240	attrs := map[string]interface{}{"num": num}
9241	for _, a := range optional {
9242		a(attrs)
9243	}
9244	opspec := tf.OpSpec{
9245		Type: "Unpack",
9246		Input: []tf.Input{
9247			value,
9248		},
9249		Attrs: attrs,
9250	}
9251	op := scope.AddOperation(opspec)
9252	if scope.Err() != nil {
9253		return
9254	}
9255	var idx int
9256	var err error
9257	if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
9258		scope.UpdateErr("Unpack", err)
9259		return
9260	}
9261	return output
9262}
9263
9264// Creates a dataset that concatenates `input_dataset` with `another_dataset`.
9265func ConcatenateDataset(scope *Scope, input_dataset tf.Output, another_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
9266	if scope.Err() != nil {
9267		return
9268	}
9269	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
9270	opspec := tf.OpSpec{
9271		Type: "ConcatenateDataset",
9272		Input: []tf.Input{
9273			input_dataset, another_dataset,
9274		},
9275		Attrs: attrs,
9276	}
9277	op := scope.AddOperation(opspec)
9278	return op.Output(0)
9279}
9280
9281// A placeholder op for a value that will be fed into the computation.
9282//
9283// DEPRECATED at GraphDef version 23: Placeholder now behaves the same as PlaceholderV2.
9284//
9285// N.B. This operation will fail with an error if it is executed. It is
9286// intended as a way to represent a value that will always be fed, and to
9287// provide attrs that enable the fed value to be checked at runtime.
9288//
9289// Arguments:
9290//	dtype: The type of elements in the tensor.
9291//	shape: The shape of the tensor. The shape can be any partially-specified
9292// shape.  To be unconstrained, pass in a shape with unknown rank.
9293//
9294// Returns A placeholder tensor that must be replaced using the feed mechanism.
9295func PlaceholderV2(scope *Scope, dtype tf.DataType, shape tf.Shape) (output tf.Output) {
9296	if scope.Err() != nil {
9297		return
9298	}
9299	attrs := map[string]interface{}{"dtype": dtype, "shape": shape}
9300	opspec := tf.OpSpec{
9301		Type: "PlaceholderV2",
9302
9303		Attrs: attrs,
9304	}
9305	op := scope.AddOperation(opspec)
9306	return op.Output(0)
9307}
9308
9309// RandomShuffleQueueV2Attr is an optional argument to RandomShuffleQueueV2.
9310type RandomShuffleQueueV2Attr func(optionalAttr)
9311
9312// RandomShuffleQueueV2Shapes sets the optional shapes attribute to value.
9313//
9314// value: The shape of each component in a value. The length of this attr must
9315// be either 0 or the same as the length of component_types. If the length of
9316// this attr is 0, the shapes of queue elements are not constrained, and
9317// only one element may be dequeued at a time.
9318// If not specified, defaults to <>
9319//
9320// REQUIRES: len(value) >= 0
9321func RandomShuffleQueueV2Shapes(value []tf.Shape) RandomShuffleQueueV2Attr {
9322	return func(m optionalAttr) {
9323		m["shapes"] = value
9324	}
9325}
9326
9327// RandomShuffleQueueV2Capacity sets the optional capacity attribute to value.
9328//
9329// value: The upper bound on the number of elements in this queue.
9330// Negative numbers mean no limit.
9331// If not specified, defaults to -1
9332func RandomShuffleQueueV2Capacity(value int64) RandomShuffleQueueV2Attr {
9333	return func(m optionalAttr) {
9334		m["capacity"] = value
9335	}
9336}
9337
9338// RandomShuffleQueueV2MinAfterDequeue sets the optional min_after_dequeue attribute to value.
9339//
9340// value: Dequeue will block unless there would be this
9341// many elements after the dequeue or the queue is closed. This
9342// ensures a minimum level of mixing of elements.
9343// If not specified, defaults to 0
9344func RandomShuffleQueueV2MinAfterDequeue(value int64) RandomShuffleQueueV2Attr {
9345	return func(m optionalAttr) {
9346		m["min_after_dequeue"] = value
9347	}
9348}
9349
9350// RandomShuffleQueueV2Seed sets the optional seed attribute to value.
9351//
9352// value: If either seed or seed2 is set to be non-zero, the random number
9353// generator is seeded by the given seed.  Otherwise, a random seed is used.
9354// If not specified, defaults to 0
9355func RandomShuffleQueueV2Seed(value int64) RandomShuffleQueueV2Attr {
9356	return func(m optionalAttr) {
9357		m["seed"] = value
9358	}
9359}
9360
9361// RandomShuffleQueueV2Seed2 sets the optional seed2 attribute to value.
9362//
9363// value: A second seed to avoid seed collision.
9364// If not specified, defaults to 0
9365func RandomShuffleQueueV2Seed2(value int64) RandomShuffleQueueV2Attr {
9366	return func(m optionalAttr) {
9367		m["seed2"] = value
9368	}
9369}
9370
9371// RandomShuffleQueueV2Container sets the optional container attribute to value.
9372//
9373// value: If non-empty, this queue is placed in the given container.
9374// Otherwise, a default container is used.
9375// If not specified, defaults to ""
9376func RandomShuffleQueueV2Container(value string) RandomShuffleQueueV2Attr {
9377	return func(m optionalAttr) {
9378		m["container"] = value
9379	}
9380}
9381
9382// RandomShuffleQueueV2SharedName sets the optional shared_name attribute to value.
9383//
9384// value: If non-empty, this queue will be shared under the given name
9385// across multiple sessions.
9386// If not specified, defaults to ""
9387func RandomShuffleQueueV2SharedName(value string) RandomShuffleQueueV2Attr {
9388	return func(m optionalAttr) {
9389		m["shared_name"] = value
9390	}
9391}
9392
9393// A queue that randomizes the order of elements.
9394//
9395// Arguments:
9396//	component_types: The type of each component in a value.
9397//
9398// Returns The handle to the queue.
9399func RandomShuffleQueueV2(scope *Scope, component_types []tf.DataType, optional ...RandomShuffleQueueV2Attr) (handle tf.Output) {
9400	if scope.Err() != nil {
9401		return
9402	}
9403	attrs := map[string]interface{}{"component_types": component_types}
9404	for _, a := range optional {
9405		a(attrs)
9406	}
9407	opspec := tf.OpSpec{
9408		Type: "RandomShuffleQueueV2",
9409
9410		Attrs: attrs,
9411	}
9412	op := scope.AddOperation(opspec)
9413	return op.Output(0)
9414}
9415
9416// Creates a dataset that splits a SparseTensor into elements row-wise.
9417func SparseTensorSliceDataset(scope *Scope, indices tf.Output, values tf.Output, dense_shape tf.Output) (handle tf.Output) {
9418	if scope.Err() != nil {
9419		return
9420	}
9421	opspec := tf.OpSpec{
9422		Type: "SparseTensorSliceDataset",
9423		Input: []tf.Input{
9424			indices, values, dense_shape,
9425		},
9426	}
9427	op := scope.AddOperation(opspec)
9428	return op.Output(0)
9429}
9430
9431// Creates a dataset that emits `components` as a tuple of tensors once.
9432func TensorDataset(scope *Scope, components []tf.Output, output_shapes []tf.Shape) (handle tf.Output) {
9433	if scope.Err() != nil {
9434		return
9435	}
9436	attrs := map[string]interface{}{"output_shapes": output_shapes}
9437	opspec := tf.OpSpec{
9438		Type: "TensorDataset",
9439		Input: []tf.Input{
9440			tf.OutputList(components),
9441		},
9442		Attrs: attrs,
9443	}
9444	op := scope.AddOperation(opspec)
9445	return op.Output(0)
9446}
9447
9448// QueueCloseV2Attr is an optional argument to QueueCloseV2.
9449type QueueCloseV2Attr func(optionalAttr)
9450
9451// QueueCloseV2CancelPendingEnqueues sets the optional cancel_pending_enqueues attribute to value.
9452//
9453// value: If true, all pending enqueue requests that are
9454// blocked on the given queue will be canceled.
9455// If not specified, defaults to false
9456func QueueCloseV2CancelPendingEnqueues(value bool) QueueCloseV2Attr {
9457	return func(m optionalAttr) {
9458		m["cancel_pending_enqueues"] = value
9459	}
9460}
9461
9462// Closes the given queue.
9463//
9464// This operation signals that no more elements will be enqueued in the
9465// given queue. Subsequent Enqueue(Many) operations will fail.
9466// Subsequent Dequeue(Many) operations will continue to succeed if
9467// sufficient elements remain in the queue. Subsequent Dequeue(Many)
9468// operations that would block will fail immediately.
9469//
9470// Arguments:
9471//	handle: The handle to a queue.
9472//
9473// Returns the created operation.
9474func QueueCloseV2(scope *Scope, handle tf.Output, optional ...QueueCloseV2Attr) (o *tf.Operation) {
9475	if scope.Err() != nil {
9476		return
9477	}
9478	attrs := map[string]interface{}{}
9479	for _, a := range optional {
9480		a(attrs)
9481	}
9482	opspec := tf.OpSpec{
9483		Type: "QueueCloseV2",
9484		Input: []tf.Input{
9485			handle,
9486		},
9487		Attrs: attrs,
9488	}
9489	return scope.AddOperation(opspec)
9490}
9491
9492// DebugIdentityV2Attr is an optional argument to DebugIdentityV2.
9493type DebugIdentityV2Attr func(optionalAttr)
9494
9495// DebugIdentityV2TfdbgContextId sets the optional tfdbg_context_id attribute to value.
9496//
9497// value: A tfdbg-generated ID for the context that the op belongs to,
9498//   e.g., a concrete compiled tf.function.
9499// If not specified, defaults to ""
9500func DebugIdentityV2TfdbgContextId(value string) DebugIdentityV2Attr {
9501	return func(m optionalAttr) {
9502		m["tfdbg_context_id"] = value
9503	}
9504}
9505
9506// DebugIdentityV2OpName sets the optional op_name attribute to value.
9507//
9508// value: Optional. Name of the op that the debug op is concerned with.
9509//   Used only for single-tensor trace.
9510// If not specified, defaults to ""
9511func DebugIdentityV2OpName(value string) DebugIdentityV2Attr {
9512	return func(m optionalAttr) {
9513		m["op_name"] = value
9514	}
9515}
9516
9517// DebugIdentityV2OutputSlot sets the optional output_slot attribute to value.
9518//
9519// value: Optional. Output slot index of the tensor that the debug op
9520//   is concerned with. Used only for single-tensor trace.
9521// If not specified, defaults to -1
9522func DebugIdentityV2OutputSlot(value int64) DebugIdentityV2Attr {
9523	return func(m optionalAttr) {
9524		m["output_slot"] = value
9525	}
9526}
9527
9528// DebugIdentityV2TensorDebugMode sets the optional tensor_debug_mode attribute to value.
9529//
9530// value: TensorDebugMode enum value. See debug_event.proto for details.
9531// If not specified, defaults to -1
9532func DebugIdentityV2TensorDebugMode(value int64) DebugIdentityV2Attr {
9533	return func(m optionalAttr) {
9534		m["tensor_debug_mode"] = value
9535	}
9536}
9537
9538// DebugIdentityV2DebugUrls sets the optional debug_urls attribute to value.
9539//
9540// value: List of URLs to debug targets, e.g., file:///foo/tfdbg_dump.
9541// If not specified, defaults to <>
9542func DebugIdentityV2DebugUrls(value []string) DebugIdentityV2Attr {
9543	return func(m optionalAttr) {
9544		m["debug_urls"] = value
9545	}
9546}
9547
9548// DebugIdentityV2CircularBufferSize sets the optional circular_buffer_size attribute to value.
9549// If not specified, defaults to 1000
9550func DebugIdentityV2CircularBufferSize(value int64) DebugIdentityV2Attr {
9551	return func(m optionalAttr) {
9552		m["circular_buffer_size"] = value
9553	}
9554}
9555
9556// DebugIdentityV2TfdbgRunId sets the optional tfdbg_run_id attribute to value.
9557// If not specified, defaults to ""
9558func DebugIdentityV2TfdbgRunId(value string) DebugIdentityV2Attr {
9559	return func(m optionalAttr) {
9560		m["tfdbg_run_id"] = value
9561	}
9562}
9563
9564// Debug Identity V2 Op.
9565//
9566// Provides an identity mapping from input to output, while writing the content of
9567// the input tensor by calling DebugEventsWriter.
9568//
9569// The semantics of the input tensor depends on tensor_debug_mode. In typical
9570// usage, the input tensor comes directly from the user computation only when
9571// graph_debug_mode is FULL_TENSOR (see protobuf/debug_event.proto for a
9572// list of all the possible values of graph_debug_mode). For the other debug modes,
9573// the input tensor should be produced by an additional op or subgraph that
9574// computes summary information about one or more tensors.
9575//
9576// Arguments:
9577//	input: Input tensor, non-Reference type
9578func DebugIdentityV2(scope *Scope, input tf.Output, optional ...DebugIdentityV2Attr) (output tf.Output) {
9579	if scope.Err() != nil {
9580		return
9581	}
9582	attrs := map[string]interface{}{}
9583	for _, a := range optional {
9584		a(attrs)
9585	}
9586	opspec := tf.OpSpec{
9587		Type: "DebugIdentityV2",
9588		Input: []tf.Input{
9589			input,
9590		},
9591		Attrs: attrs,
9592	}
9593	op := scope.AddOperation(opspec)
9594	return op.Output(0)
9595}
9596
9597// DebugNanCountAttr is an optional argument to DebugNanCount.
9598type DebugNanCountAttr func(optionalAttr)
9599
9600// DebugNanCountDeviceName sets the optional device_name attribute to value.
9601// If not specified, defaults to ""
9602func DebugNanCountDeviceName(value string) DebugNanCountAttr {
9603	return func(m optionalAttr) {
9604		m["device_name"] = value
9605	}
9606}
9607
9608// DebugNanCountTensorName sets the optional tensor_name attribute to value.
9609//
9610// value: Name of the input tensor.
9611// If not specified, defaults to ""
9612func DebugNanCountTensorName(value string) DebugNanCountAttr {
9613	return func(m optionalAttr) {
9614		m["tensor_name"] = value
9615	}
9616}
9617
9618// DebugNanCountDebugUrls sets the optional debug_urls attribute to value.
9619//
9620// value: List of URLs to debug targets, e.g.,
9621//   file:///foo/tfdbg_dump, grpc:://localhost:11011.
9622// If not specified, defaults to <>
9623func DebugNanCountDebugUrls(value []string) DebugNanCountAttr {
9624	return func(m optionalAttr) {
9625		m["debug_urls"] = value
9626	}
9627}
9628
9629// DebugNanCountGatedGrpc sets the optional gated_grpc attribute to value.
9630//
9631// value:  Whether this op will be gated. If any of the debug_urls of this
9632//   debug node is of the grpc:// scheme, when the value of this attribute is set
9633//   to True, the data will not actually be sent via the grpc stream unless this
9634//   debug op has been enabled at the debug_url. If all of the debug_urls of this
9635//   debug node are of the grpc:// scheme and the debug op is enabled at none of
9636//   them, the output will be an empty Tensor.
9637// If not specified, defaults to false
9638func DebugNanCountGatedGrpc(value bool) DebugNanCountAttr {
9639	return func(m optionalAttr) {
9640		m["gated_grpc"] = value
9641	}
9642}
9643
9644// Debug NaN Value Counter Op.
9645//
9646// Counts number of NaNs in the input tensor, for debugging.
9647//
9648// Arguments:
9649//	input: Input tensor, non-Reference type.
9650func DebugNanCount(scope *Scope, input tf.Output, optional ...DebugNanCountAttr) (output tf.Output) {
9651	if scope.Err() != nil {
9652		return
9653	}
9654	attrs := map[string]interface{}{}
9655	for _, a := range optional {
9656		a(attrs)
9657	}
9658	opspec := tf.OpSpec{
9659		Type: "DebugNanCount",
9660		Input: []tf.Input{
9661			input,
9662		},
9663		Attrs: attrs,
9664	}
9665	op := scope.AddOperation(opspec)
9666	return op.Output(0)
9667}
9668
9669// DebugIdentityAttr is an optional argument to DebugIdentity.
9670type DebugIdentityAttr func(optionalAttr)
9671
9672// DebugIdentityDeviceName sets the optional device_name attribute to value.
9673//
9674// value: Name of the device on which the tensor resides.
9675// If not specified, defaults to ""
9676func DebugIdentityDeviceName(value string) DebugIdentityAttr {
9677	return func(m optionalAttr) {
9678		m["device_name"] = value
9679	}
9680}
9681
9682// DebugIdentityTensorName sets the optional tensor_name attribute to value.
9683//
9684// value: Name of the input tensor.
9685// If not specified, defaults to ""
9686func DebugIdentityTensorName(value string) DebugIdentityAttr {
9687	return func(m optionalAttr) {
9688		m["tensor_name"] = value
9689	}
9690}
9691
9692// DebugIdentityDebugUrls sets the optional debug_urls attribute to value.
9693//
9694// value: List of URLs to debug targets, e.g.,
9695//   file:///foo/tfdbg_dump, grpc:://localhost:11011
9696// If not specified, defaults to <>
9697func DebugIdentityDebugUrls(value []string) DebugIdentityAttr {
9698	return func(m optionalAttr) {
9699		m["debug_urls"] = value
9700	}
9701}
9702
9703// DebugIdentityGatedGrpc sets the optional gated_grpc attribute to value.
9704//
9705// value: Whether this op will be gated. If any of the debug_urls of this
9706//   debug node is of the grpc:// scheme, when the value of this attribute is set
9707//   to True, the data will not actually be sent via the grpc stream unless this
9708//   debug op has been enabled at the debug_url. If all of the debug_urls of this
9709//   debug node are of the grpc:// scheme and the debug op is enabled at none of
9710//   them, the output will be an empty Tensor.
9711// If not specified, defaults to false
9712func DebugIdentityGatedGrpc(value bool) DebugIdentityAttr {
9713	return func(m optionalAttr) {
9714		m["gated_grpc"] = value
9715	}
9716}
9717
9718// Provides an identity mapping of the non-Ref type input tensor for debugging.
9719//
9720// Provides an identity mapping of the non-Ref type input tensor for debugging.
9721//
9722// Arguments:
9723//	input: Input tensor, non-Reference type
9724func DebugIdentity(scope *Scope, input tf.Output, optional ...DebugIdentityAttr) (output tf.Output) {
9725	if scope.Err() != nil {
9726		return
9727	}
9728	attrs := map[string]interface{}{}
9729	for _, a := range optional {
9730		a(attrs)
9731	}
9732	opspec := tf.OpSpec{
9733		Type: "DebugIdentity",
9734		Input: []tf.Input{
9735			input,
9736		},
9737		Attrs: attrs,
9738	}
9739	op := scope.AddOperation(opspec)
9740	return op.Output(0)
9741}
9742
9743// Aggregates the summary of accumulated stats for the batch.
9744//
9745// The summary stats contains gradients and hessians accumulated for each node, bucket and dimension id.
9746//
9747// Arguments:
9748//	node_ids: int32; Rank 1 Tensor containing node ids for each example, shape [batch_size].
9749//	gradients: float32; Rank 2 Tensor (shape=[batch_size, logits_dimension]) with gradients for each example.
9750//	hessians: float32; Rank 2 Tensor (shape=[batch_size, hessian_dimension]) with hessians for each example.
9751//	feature_indices: int32; Rank 2 indices of feature sparse Tensors (shape=[number of sparse entries, 2]).
9752// Number of sparse entries across all instances from the batch. The first value is
9753// the index of the instance, the second is dimension of the feature. The second axis
9754// can only have 2 values, i.e., the input dense version of Tensor can only be matrix.
9755//	feature_values: int32; Rank 1 values of feature sparse Tensors (shape=[number of sparse entries]).
9756// Number of sparse entries across all instances from the batch. The first value is
9757// the index of the instance, the second is dimension of the feature.
9758//	feature_shape: int32; Rank 1 dense shape of feature sparse Tensors (shape=[2]).
9759// The first axis can only have 2 values, [batch_size, feature_dimension].
9760//	max_splits: int; the maximum number of splits possible in the whole tree.
9761//	num_buckets: int; equals to the maximum possible value of bucketized feature + 1.
9762//
9763// Returns:
9764//	stats_summary_indices: int32; Rank 2 indices of summary sparse Tensors (shape=[number of non zero statistics, 4])
9765// The second axis can only be 4 including node id, feature dimension, bucket id, and statistics_dimension.
9766// statistics_dimension = logits_dimension + hessian_dimension.
9767//	stats_summary_values: output Rank 1 Tensor (shape=[number of non zero statistics])
9768//	stats_summary_shape: output Rank 1 Tensor (shape=[4])
9769// The tensor has following 4 values: [max_splits, feature_dimension, num_buckets, statistics_dimension],
9770// where statistics_dimension = gradient_dimension + hessian_dimension. gradient_dimension
9771// is the same as label_dimension, i.e., the output space. hessian_dimension can be the same
9772// as logits dimension when diagonal hessian is used, or label_dimension^2 when full
9773// hessian is used.
9774func BoostedTreesSparseAggregateStats(scope *Scope, node_ids tf.Output, gradients tf.Output, hessians tf.Output, feature_indices tf.Output, feature_values tf.Output, feature_shape tf.Output, max_splits int64, num_buckets int64) (stats_summary_indices tf.Output, stats_summary_values tf.Output, stats_summary_shape tf.Output) {
9775	if scope.Err() != nil {
9776		return
9777	}
9778	attrs := map[string]interface{}{"max_splits": max_splits, "num_buckets": num_buckets}
9779	opspec := tf.OpSpec{
9780		Type: "BoostedTreesSparseAggregateStats",
9781		Input: []tf.Input{
9782			node_ids, gradients, hessians, feature_indices, feature_values, feature_shape,
9783		},
9784		Attrs: attrs,
9785	}
9786	op := scope.AddOperation(opspec)
9787	return op.Output(0), op.Output(1), op.Output(2)
9788}
9789
9790// DecodeProtoV2Attr is an optional argument to DecodeProtoV2.
9791type DecodeProtoV2Attr func(optionalAttr)
9792
9793// DecodeProtoV2DescriptorSource sets the optional descriptor_source attribute to value.
9794//
9795// value: Either the special value `local://` or a path to a file containing
9796// a serialized `FileDescriptorSet`.
9797// If not specified, defaults to "local://"
9798func DecodeProtoV2DescriptorSource(value string) DecodeProtoV2Attr {
9799	return func(m optionalAttr) {
9800		m["descriptor_source"] = value
9801	}
9802}
9803
9804// DecodeProtoV2MessageFormat sets the optional message_format attribute to value.
9805//
9806// value: Either `binary` or `text`.
9807// If not specified, defaults to "binary"
9808func DecodeProtoV2MessageFormat(value string) DecodeProtoV2Attr {
9809	return func(m optionalAttr) {
9810		m["message_format"] = value
9811	}
9812}
9813
9814// DecodeProtoV2Sanitize sets the optional sanitize attribute to value.
9815//
9816// value: Whether to sanitize the result or not.
9817// If not specified, defaults to false
9818func DecodeProtoV2Sanitize(value bool) DecodeProtoV2Attr {
9819	return func(m optionalAttr) {
9820		m["sanitize"] = value
9821	}
9822}
9823
9824// The op extracts fields from a serialized protocol buffers message into tensors.
9825//
9826// The `decode_proto` op extracts fields from a serialized protocol buffers
9827// message into tensors.  The fields in `field_names` are decoded and converted
9828// to the corresponding `output_types` if possible.
9829//
9830// A `message_type` name must be provided to give context for the field names.
9831// The actual message descriptor can be looked up either in the linked-in
9832// descriptor pool or a filename provided by the caller using the
9833// `descriptor_source` attribute.
9834//
9835// Each output tensor is a dense tensor. This means that it is padded to hold
9836// the largest number of repeated elements seen in the input minibatch. (The
9837// shape is also padded by one to prevent zero-sized dimensions). The actual
9838// repeat counts for each example in the minibatch can be found in the `sizes`
9839// output. In many cases the output of `decode_proto` is fed immediately into
9840// tf.squeeze if missing values are not a concern. When using tf.squeeze, always
9841// pass the squeeze dimension explicitly to avoid surprises.
9842//
9843// For the most part, the mapping between Proto field types and TensorFlow dtypes
9844// is straightforward. However, there are a few special cases:
9845//
9846// - A proto field that contains a submessage or group can only be converted
9847// to `DT_STRING` (the serialized submessage). This is to reduce the complexity
9848// of the API. The resulting string can be used as input to another instance of
9849// the decode_proto op.
9850//
9851// - TensorFlow lacks support for unsigned integers. The ops represent uint64
9852// types as a `DT_INT64` with the same twos-complement bit pattern (the obvious
9853// way). Unsigned int32 values can be represented exactly by specifying type
9854// `DT_INT64`, or using twos-complement if the caller specifies `DT_INT32` in
9855// the `output_types` attribute.
9856//
9857// Both binary and text proto serializations are supported, and can be
9858// chosen using the `format` attribute.
9859//
9860// The `descriptor_source` attribute selects the source of protocol
9861// descriptors to consult when looking up `message_type`. This may be:
9862//
9863// - An empty string  or "local://", in which case protocol descriptors are
9864// created for C++ (not Python) proto definitions linked to the binary.
9865//
9866// - A file, in which case protocol descriptors are created from the file,
9867// which is expected to contain a `FileDescriptorSet` serialized as a string.
9868// NOTE: You can build a `descriptor_source` file using the `--descriptor_set_out`
9869// and `--include_imports` options to the protocol compiler `protoc`.
9870//
9871// - A "bytes://<bytes>", in which protocol descriptors are created from `<bytes>`,
9872// which is expected to be a `FileDescriptorSet` serialized as a string.
9873//
9874// Arguments:
9875//	bytes: Tensor of serialized protos with shape `batch_shape`.
9876//	message_type: Name of the proto message type to decode.
9877//	field_names: List of strings containing proto field names. An extension field can be decoded
9878// by using its full name, e.g. EXT_PACKAGE.EXT_FIELD_NAME.
9879//	output_types: List of TF types to use for the respective field in field_names.
9880//
9881// Returns:
9882//	sizes: Tensor of int32 with shape `[batch_shape, len(field_names)]`.
9883// Each entry is the number of values found for the corresponding field.
9884// Optional fields may have 0 or 1 values.
9885//	values: List of tensors containing values for the corresponding field.
9886// `values[i]` has datatype `output_types[i]`
9887// and shape `[batch_shape, max(sizes[...,i])]`.
9888func DecodeProtoV2(scope *Scope, bytes tf.Output, message_type string, field_names []string, output_types []tf.DataType, optional ...DecodeProtoV2Attr) (sizes tf.Output, values []tf.Output) {
9889	if scope.Err() != nil {
9890		return
9891	}
9892	attrs := map[string]interface{}{"message_type": message_type, "field_names": field_names, "output_types": output_types}
9893	for _, a := range optional {
9894		a(attrs)
9895	}
9896	opspec := tf.OpSpec{
9897		Type: "DecodeProtoV2",
9898		Input: []tf.Input{
9899			bytes,
9900		},
9901		Attrs: attrs,
9902	}
9903	op := scope.AddOperation(opspec)
9904	if scope.Err() != nil {
9905		return
9906	}
9907	var idx int
9908	var err error
9909	sizes = op.Output(idx)
9910	if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
9911		scope.UpdateErr("DecodeProtoV2", err)
9912		return
9913	}
9914	return sizes, values
9915}
9916
9917// Output the logits for the given input data
9918//
9919// Arguments:
9920//	tree_handle: Handle to the tree resource.
9921//	dense_features: Rank 2 dense features tensor.
9922//	logits_dimension: Scalar, dimension of the logits.
9923//
9924// Returns The logits predictions from the tree for each instance in the batch.
9925func TensorForestTreePredict(scope *Scope, tree_handle tf.Output, dense_features tf.Output, logits_dimension int64) (logits tf.Output) {
9926	if scope.Err() != nil {
9927		return
9928	}
9929	attrs := map[string]interface{}{"logits_dimension": logits_dimension}
9930	opspec := tf.OpSpec{
9931		Type: "TensorForestTreePredict",
9932		Input: []tf.Input{
9933			tree_handle, dense_features,
9934		},
9935		Attrs: attrs,
9936	}
9937	op := scope.AddOperation(opspec)
9938	return op.Output(0)
9939}
9940
9941// EncodeProtoAttr is an optional argument to EncodeProto.
9942type EncodeProtoAttr func(optionalAttr)
9943
9944// EncodeProtoDescriptorSource sets the optional descriptor_source attribute to value.
9945// If not specified, defaults to "local://"
9946func EncodeProtoDescriptorSource(value string) EncodeProtoAttr {
9947	return func(m optionalAttr) {
9948		m["descriptor_source"] = value
9949	}
9950}
9951
9952// The op serializes protobuf messages provided in the input tensors.
9953//
9954// The types of the tensors in `values` must match the schema for the fields
9955// specified in `field_names`. All the tensors in `values` must have a common
9956// shape prefix, *batch_shape*.
9957//
9958// The `sizes` tensor specifies repeat counts for each field.  The repeat count
9959// (last dimension) of a each tensor in `values` must be greater than or equal
9960// to corresponding repeat count in `sizes`.
9961//
9962// A `message_type` name must be provided to give context for the field names.
9963// The actual message descriptor can be looked up either in the linked-in
9964// descriptor pool or a filename provided by the caller using the
9965// `descriptor_source` attribute.
9966//
9967// For the most part, the mapping between Proto field types and TensorFlow dtypes
9968// is straightforward. However, there are a few special cases:
9969//
9970// - A proto field that contains a submessage or group can only be converted
9971// to `DT_STRING` (the serialized submessage). This is to reduce the complexity
9972// of the API. The resulting string can be used as input to another instance of
9973// the decode_proto op.
9974//
9975// - TensorFlow lacks support for unsigned integers. The ops represent uint64
9976// types as a `DT_INT64` with the same twos-complement bit pattern (the obvious
9977// way). Unsigned int32 values can be represented exactly by specifying type
9978// `DT_INT64`, or using twos-complement if the caller specifies `DT_INT32` in
9979// the `output_types` attribute.
9980//
9981// The `descriptor_source` attribute selects the source of protocol
9982// descriptors to consult when looking up `message_type`. This may be:
9983//
9984// - An empty string  or "local://", in which case protocol descriptors are
9985// created for C++ (not Python) proto definitions linked to the binary.
9986//
9987// - A file, in which case protocol descriptors are created from the file,
9988// which is expected to contain a `FileDescriptorSet` serialized as a string.
9989// NOTE: You can build a `descriptor_source` file using the `--descriptor_set_out`
9990// and `--include_imports` options to the protocol compiler `protoc`.
9991//
9992// - A "bytes://<bytes>", in which protocol descriptors are created from `<bytes>`,
9993// which is expected to be a `FileDescriptorSet` serialized as a string.
9994//
9995// Arguments:
9996//	sizes: Tensor of int32 with shape `[batch_shape, len(field_names)]`.
9997//	values: List of tensors containing values for the corresponding field.
9998//	field_names: List of strings containing proto field names.
9999//	message_type: Name of the proto message type to decode.
10000//
10001// Returns Tensor of serialized protos with shape `batch_shape`.
10002func EncodeProto(scope *Scope, sizes tf.Output, values []tf.Output, field_names []string, message_type string, optional ...EncodeProtoAttr) (bytes tf.Output) {
10003	if scope.Err() != nil {
10004		return
10005	}
10006	attrs := map[string]interface{}{"field_names": field_names, "message_type": message_type}
10007	for _, a := range optional {
10008		a(attrs)
10009	}
10010	opspec := tf.OpSpec{
10011		Type: "EncodeProto",
10012		Input: []tf.Input{
10013			sizes, tf.OutputList(values),
10014		},
10015		Attrs: attrs,
10016	}
10017	op := scope.AddOperation(opspec)
10018	return op.Output(0)
10019}
10020
10021// Registers a dataset with the tf.data service.
10022func RegisterDataset(scope *Scope, dataset tf.Output, address tf.Output, protocol tf.Output, external_state_policy int64) (dataset_id tf.Output) {
10023	if scope.Err() != nil {
10024		return
10025	}
10026	attrs := map[string]interface{}{"external_state_policy": external_state_policy}
10027	opspec := tf.OpSpec{
10028		Type: "RegisterDataset",
10029		Input: []tf.Input{
10030			dataset, address, protocol,
10031		},
10032		Attrs: attrs,
10033	}
10034	op := scope.AddOperation(opspec)
10035	return op.Output(0)
10036}
10037
10038// DataServiceDatasetAttr is an optional argument to DataServiceDataset.
10039type DataServiceDatasetAttr func(optionalAttr)
10040
10041// DataServiceDatasetTaskRefreshIntervalHintMs sets the optional task_refresh_interval_hint_ms attribute to value.
10042// If not specified, defaults to -1
10043func DataServiceDatasetTaskRefreshIntervalHintMs(value int64) DataServiceDatasetAttr {
10044	return func(m optionalAttr) {
10045		m["task_refresh_interval_hint_ms"] = value
10046	}
10047}
10048
10049// DataServiceDatasetDataTransferProtocol sets the optional data_transfer_protocol attribute to value.
10050// If not specified, defaults to ""
10051func DataServiceDatasetDataTransferProtocol(value string) DataServiceDatasetAttr {
10052	return func(m optionalAttr) {
10053		m["data_transfer_protocol"] = value
10054	}
10055}
10056
10057// Creates a dataset that reads data from the tf.data service.
10058func DataServiceDataset(scope *Scope, dataset_id tf.Output, processing_mode tf.Output, address tf.Output, protocol tf.Output, job_name tf.Output, max_outstanding_requests tf.Output, iteration_counter tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...DataServiceDatasetAttr) (handle tf.Output) {
10059	if scope.Err() != nil {
10060		return
10061	}
10062	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
10063	for _, a := range optional {
10064		a(attrs)
10065	}
10066	opspec := tf.OpSpec{
10067		Type: "DataServiceDataset",
10068		Input: []tf.Input{
10069			dataset_id, processing_mode, address, protocol, job_name, max_outstanding_requests, iteration_counter,
10070		},
10071		Attrs: attrs,
10072	}
10073	op := scope.AddOperation(opspec)
10074	return op.Output(0)
10075}
10076
10077// Creates a dataset that contains the unique elements of `input_dataset`.
10078func UniqueDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
10079	if scope.Err() != nil {
10080		return
10081	}
10082	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
10083	opspec := tf.OpSpec{
10084		Type: "UniqueDataset",
10085		Input: []tf.Input{
10086			input_dataset,
10087		},
10088		Attrs: attrs,
10089	}
10090	op := scope.AddOperation(opspec)
10091	return op.Output(0)
10092}
10093
10094// A dataset that splits the elements of its input into multiple elements.
10095func ExperimentalUnbatchDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
10096	if scope.Err() != nil {
10097		return
10098	}
10099	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
10100	opspec := tf.OpSpec{
10101		Type: "ExperimentalUnbatchDataset",
10102		Input: []tf.Input{
10103			input_dataset,
10104		},
10105		Attrs: attrs,
10106	}
10107	op := scope.AddOperation(opspec)
10108	return op.Output(0)
10109}
10110
10111// A dataset that splits the elements of its input into multiple elements.
10112func UnbatchDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
10113	if scope.Err() != nil {
10114		return
10115	}
10116	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
10117	opspec := tf.OpSpec{
10118		Type: "UnbatchDataset",
10119		Input: []tf.Input{
10120			input_dataset,
10121		},
10122		Attrs: attrs,
10123	}
10124	op := scope.AddOperation(opspec)
10125	return op.Output(0)
10126}
10127
10128// Creates a dataset that uses a custom thread pool to compute `input_dataset`.
10129//
10130// Arguments:
10131//
10132//	thread_pool: A resource produced by the ThreadPoolHandle op.
10133//
10134//
10135func ExperimentalThreadPoolDataset(scope *Scope, input_dataset tf.Output, thread_pool tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
10136	if scope.Err() != nil {
10137		return
10138	}
10139	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
10140	opspec := tf.OpSpec{
10141		Type: "ExperimentalThreadPoolDataset",
10142		Input: []tf.Input{
10143			input_dataset, thread_pool,
10144		},
10145		Attrs: attrs,
10146	}
10147	op := scope.AddOperation(opspec)
10148	return op.Output(0)
10149}
10150
10151// Gets the next output from the given iterator as an Optional variant.
10152func IteratorGetNextAsOptional(scope *Scope, iterator tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (optional tf.Output) {
10153	if scope.Err() != nil {
10154		return
10155	}
10156	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
10157	opspec := tf.OpSpec{
10158		Type: "IteratorGetNextAsOptional",
10159		Input: []tf.Input{
10160			iterator,
10161		},
10162		Attrs: attrs,
10163	}
10164	op := scope.AddOperation(opspec)
10165	return op.Output(0)
10166}
10167
10168// Produces a summary of any statistics recorded by the given statistics manager.
10169func StatsAggregatorSummary(scope *Scope, iterator tf.Output) (summary tf.Output) {
10170	if scope.Err() != nil {
10171		return
10172	}
10173	opspec := tf.OpSpec{
10174		Type: "StatsAggregatorSummary",
10175		Input: []tf.Input{
10176			iterator,
10177		},
10178	}
10179	op := scope.AddOperation(opspec)
10180	return op.Output(0)
10181}
10182
10183// ExperimentalStatsAggregatorHandleAttr is an optional argument to ExperimentalStatsAggregatorHandle.
10184type ExperimentalStatsAggregatorHandleAttr func(optionalAttr)
10185
10186// ExperimentalStatsAggregatorHandleContainer sets the optional container attribute to value.
10187// If not specified, defaults to ""
10188func ExperimentalStatsAggregatorHandleContainer(value string) ExperimentalStatsAggregatorHandleAttr {
10189	return func(m optionalAttr) {
10190		m["container"] = value
10191	}
10192}
10193
10194// ExperimentalStatsAggregatorHandleSharedName sets the optional shared_name attribute to value.
10195// If not specified, defaults to ""
10196func ExperimentalStatsAggregatorHandleSharedName(value string) ExperimentalStatsAggregatorHandleAttr {
10197	return func(m optionalAttr) {
10198		m["shared_name"] = value
10199	}
10200}
10201
10202// Creates a statistics manager resource.
10203func ExperimentalStatsAggregatorHandle(scope *Scope, optional ...ExperimentalStatsAggregatorHandleAttr) (handle tf.Output) {
10204	if scope.Err() != nil {
10205		return
10206	}
10207	attrs := map[string]interface{}{}
10208	for _, a := range optional {
10209		a(attrs)
10210	}
10211	opspec := tf.OpSpec{
10212		Type: "ExperimentalStatsAggregatorHandle",
10213
10214		Attrs: attrs,
10215	}
10216	op := scope.AddOperation(opspec)
10217	return op.Output(0)
10218}
10219
10220// StatsAggregatorHandleAttr is an optional argument to StatsAggregatorHandle.
10221type StatsAggregatorHandleAttr func(optionalAttr)
10222
10223// StatsAggregatorHandleContainer sets the optional container attribute to value.
10224// If not specified, defaults to ""
10225func StatsAggregatorHandleContainer(value string) StatsAggregatorHandleAttr {
10226	return func(m optionalAttr) {
10227		m["container"] = value
10228	}
10229}
10230
10231// StatsAggregatorHandleSharedName sets the optional shared_name attribute to value.
10232// If not specified, defaults to ""
10233func StatsAggregatorHandleSharedName(value string) StatsAggregatorHandleAttr {
10234	return func(m optionalAttr) {
10235		m["shared_name"] = value
10236	}
10237}
10238
10239// Creates a statistics manager resource.
10240func StatsAggregatorHandle(scope *Scope, optional ...StatsAggregatorHandleAttr) (handle tf.Output) {
10241	if scope.Err() != nil {
10242		return
10243	}
10244	attrs := map[string]interface{}{}
10245	for _, a := range optional {
10246		a(attrs)
10247	}
10248	opspec := tf.OpSpec{
10249		Type: "StatsAggregatorHandle",
10250
10251		Attrs: attrs,
10252	}
10253	op := scope.AddOperation(opspec)
10254	return op.Output(0)
10255}
10256
10257// Creates a dataset that executes a SQL query and emits rows of the result set.
10258//
10259// Arguments:
10260//	driver_name: The database type. Currently, the only supported type is 'sqlite'.
10261//	data_source_name: A connection string to connect to the database.
10262//	query: A SQL query to execute.
10263//
10264//
10265func ExperimentalSqlDataset(scope *Scope, driver_name tf.Output, data_source_name tf.Output, query tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
10266	if scope.Err() != nil {
10267		return
10268	}
10269	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
10270	opspec := tf.OpSpec{
10271		Type: "ExperimentalSqlDataset",
10272		Input: []tf.Input{
10273			driver_name, data_source_name, query,
10274		},
10275		Attrs: attrs,
10276	}
10277	op := scope.AddOperation(opspec)
10278	return op.Output(0)
10279}
10280
10281// Generate the bucket boundaries for each feature based on accumulated summaries.
10282//
10283// An op that returns a list of float tensors for a quantile stream resource. Each
10284// tensor is Rank 1 containing bucket boundaries for a single feature.
10285//
10286// Arguments:
10287//	quantile_stream_resource_handle: resource handle referring to a QuantileStreamResource.
10288//	num_features: inferred int; number of features to get bucket boundaries for.
10289//
10290// Returns float; List of Rank 1 Tensors each containing the bucket boundaries for a feature.
10291func BoostedTreesQuantileStreamResourceGetBucketBoundaries(scope *Scope, quantile_stream_resource_handle tf.Output, num_features int64) (bucket_boundaries []tf.Output) {
10292	if scope.Err() != nil {
10293		return
10294	}
10295	attrs := map[string]interface{}{"num_features": num_features}
10296	opspec := tf.OpSpec{
10297		Type: "BoostedTreesQuantileStreamResourceGetBucketBoundaries",
10298		Input: []tf.Input{
10299			quantile_stream_resource_handle,
10300		},
10301		Attrs: attrs,
10302	}
10303	op := scope.AddOperation(opspec)
10304	if scope.Err() != nil {
10305		return
10306	}
10307	var idx int
10308	var err error
10309	if bucket_boundaries, idx, err = makeOutputList(op, idx, "bucket_boundaries"); err != nil {
10310		scope.UpdateErr("BoostedTreesQuantileStreamResourceGetBucketBoundaries", err)
10311		return
10312	}
10313	return bucket_boundaries
10314}
10315
10316// Creates a dataset that passes a sliding window over `input_dataset`.
10317//
10318// Arguments:
10319//
10320//	window_size: A scalar representing the number of elements in the
10321// sliding window.
10322//	window_shift: A scalar representing the steps moving the sliding window
10323// forward in one iteration. It must be positive.
10324//	window_stride: A scalar representing the stride of the input elements of the sliding window.
10325// It must be positive.
10326//
10327//
10328func ExperimentalSlidingWindowDataset(scope *Scope, input_dataset tf.Output, window_size tf.Output, window_shift tf.Output, window_stride tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
10329	if scope.Err() != nil {
10330		return
10331	}
10332	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
10333	opspec := tf.OpSpec{
10334		Type: "ExperimentalSlidingWindowDataset",
10335		Input: []tf.Input{
10336			input_dataset, window_size, window_shift, window_stride,
10337		},
10338		Attrs: attrs,
10339	}
10340	op := scope.AddOperation(opspec)
10341	return op.Output(0)
10342}
10343
10344// Deprecated. Use TensorArraySizeV3
10345//
10346// DEPRECATED at GraphDef version 26: Use TensorArraySizeV3
10347func TensorArraySizeV2(scope *Scope, handle tf.Output, flow_in tf.Output) (size tf.Output) {
10348	if scope.Err() != nil {
10349		return
10350	}
10351	opspec := tf.OpSpec{
10352		Type: "TensorArraySizeV2",
10353		Input: []tf.Input{
10354			handle, flow_in,
10355		},
10356	}
10357	op := scope.AddOperation(opspec)
10358	return op.Output(0)
10359}
10360
10361// Creates a dataset that changes the batch size.
10362//
10363// Creates a dataset that rebatches elements from `input_dataset` into new batch
10364// sizes.
10365//
10366// Arguments:
10367//	input_dataset: A variant tensor representing the input dataset.
10368//	batch_sizes: A vector of integers representing the size of batches to produce. These values
10369// are cycled through in order.
10370//
10371//
10372//
10373func RebatchDatasetV2(scope *Scope, input_dataset tf.Output, batch_sizes tf.Output, drop_remainder tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
10374	if scope.Err() != nil {
10375		return
10376	}
10377	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
10378	opspec := tf.OpSpec{
10379		Type: "RebatchDatasetV2",
10380		Input: []tf.Input{
10381			input_dataset, batch_sizes, drop_remainder,
10382		},
10383		Attrs: attrs,
10384	}
10385	op := scope.AddOperation(opspec)
10386	return op.Output(0)
10387}
10388
10389// RebatchDatasetAttr is an optional argument to RebatchDataset.
10390type RebatchDatasetAttr func(optionalAttr)
10391
10392// RebatchDatasetUseFallback sets the optional use_fallback attribute to value.
10393// If not specified, defaults to true
10394func RebatchDatasetUseFallback(value bool) RebatchDatasetAttr {
10395	return func(m optionalAttr) {
10396		m["use_fallback"] = value
10397	}
10398}
10399
10400// Creates a dataset that changes the batch size.
10401//
10402// Creates a dataset that changes the batch size of the dataset to current batch
10403// size // num_workers.
10404//
10405// Arguments:
10406//	input_dataset: A variant tensor representing the input dataset.
10407//	num_replicas: A scalar representing the number of replicas to distribute this batch across. As
10408// a result of this transformation the current batch size would end up being
10409// divided  by this parameter.
10410//
10411//
10412func RebatchDataset(scope *Scope, input_dataset tf.Output, num_replicas tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...RebatchDatasetAttr) (handle tf.Output) {
10413	if scope.Err() != nil {
10414		return
10415	}
10416	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
10417	for _, a := range optional {
10418		a(attrs)
10419	}
10420	opspec := tf.OpSpec{
10421		Type: "RebatchDataset",
10422		Input: []tf.Input{
10423			input_dataset, num_replicas,
10424		},
10425		Attrs: attrs,
10426	}
10427	op := scope.AddOperation(opspec)
10428	return op.Output(0)
10429}
10430
10431// Creates a dataset that uses a custom thread pool to compute `input_dataset`.
10432//
10433// Arguments:
10434//
10435//	num_threads: Identifies the number of threads to use for the private threadpool.
10436//
10437//
10438func ExperimentalPrivateThreadPoolDataset(scope *Scope, input_dataset tf.Output, num_threads tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
10439	if scope.Err() != nil {
10440		return
10441	}
10442	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
10443	opspec := tf.OpSpec{
10444		Type: "ExperimentalPrivateThreadPoolDataset",
10445		Input: []tf.Input{
10446			input_dataset, num_threads,
10447		},
10448		Attrs: attrs,
10449	}
10450	op := scope.AddOperation(opspec)
10451	return op.Output(0)
10452}
10453
10454// Creates a dataset that uses a custom thread pool to compute `input_dataset`.
10455//
10456// Arguments:
10457//
10458//	num_threads: Identifies the number of threads to use for the private threadpool.
10459//
10460//
10461func PrivateThreadPoolDataset(scope *Scope, input_dataset tf.Output, num_threads tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
10462	if scope.Err() != nil {
10463		return
10464	}
10465	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
10466	opspec := tf.OpSpec{
10467		Type: "PrivateThreadPoolDataset",
10468		Input: []tf.Input{
10469			input_dataset, num_threads,
10470		},
10471		Attrs: attrs,
10472	}
10473	op := scope.AddOperation(opspec)
10474	return op.Output(0)
10475}
10476
10477// ExperimentalParseExampleDatasetAttr is an optional argument to ExperimentalParseExampleDataset.
10478type ExperimentalParseExampleDatasetAttr func(optionalAttr)
10479
10480// ExperimentalParseExampleDatasetSloppy sets the optional sloppy attribute to value.
10481// If not specified, defaults to false
10482func ExperimentalParseExampleDatasetSloppy(value bool) ExperimentalParseExampleDatasetAttr {
10483	return func(m optionalAttr) {
10484		m["sloppy"] = value
10485	}
10486}
10487
10488// Transforms `input_dataset` containing `Example` protos as vectors of DT_STRING into a dataset of `Tensor` or `SparseTensor` objects representing the parsed features.
10489//
10490// Arguments:
10491//
10492//
10493//	dense_defaults: A dict mapping string keys to `Tensor`s.
10494// The keys of the dict must match the dense_keys of the feature.
10495//	sparse_keys: A list of string keys in the examples features.
10496// The results for these keys will be returned as `SparseTensor` objects.
10497//	dense_keys: A list of Ndense string Tensors (scalars).
10498// The keys expected in the Examples features associated with dense values.
10499//	sparse_types: A list of `DTypes` of the same length as `sparse_keys`.
10500// Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
10501// and `tf.string` (`BytesList`) are supported.
10502//	dense_shapes: List of tuples with the same length as `dense_keys`.
10503// The shape of the data for each dense feature referenced by `dense_keys`.
10504// Required for any input tensors identified by `dense_keys`.  Must be
10505// either fully defined, or may contain an unknown first dimension.
10506// An unknown first dimension means the feature is treated as having
10507// a variable number of blocks, and the output shape along this dimension
10508// is considered unknown at graph build time.  Padding is applied for
10509// minibatch elements smaller than the maximum number of blocks for the
10510// given feature along this dimension.
10511//	output_types: The type list for the return values.
10512//	output_shapes: The list of shapes being produced.
10513func ExperimentalParseExampleDataset(scope *Scope, input_dataset tf.Output, num_parallel_calls tf.Output, dense_defaults []tf.Output, sparse_keys []string, dense_keys []string, sparse_types []tf.DataType, dense_shapes []tf.Shape, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ExperimentalParseExampleDatasetAttr) (handle tf.Output) {
10514	if scope.Err() != nil {
10515		return
10516	}
10517	attrs := map[string]interface{}{"sparse_keys": sparse_keys, "dense_keys": dense_keys, "sparse_types": sparse_types, "dense_shapes": dense_shapes, "output_types": output_types, "output_shapes": output_shapes}
10518	for _, a := range optional {
10519		a(attrs)
10520	}
10521	opspec := tf.OpSpec{
10522		Type: "ExperimentalParseExampleDataset",
10523		Input: []tf.Input{
10524			input_dataset, num_parallel_calls, tf.OutputList(dense_defaults),
10525		},
10526		Attrs: attrs,
10527	}
10528	op := scope.AddOperation(opspec)
10529	return op.Output(0)
10530}
10531
10532// Returns a batched matrix tensor with new batched diagonal values.
10533//
10534// Given `input` and `diagonal`, this operation returns a tensor with the
10535// same shape and values as `input`, except for the main diagonal of the
10536// innermost matrices.  These will be overwritten by the values in `diagonal`.
10537//
10538// The output is computed as follows:
10539//
10540// Assume `input` has `k+1` dimensions `[I, J, K, ..., M, N]` and `diagonal` has
10541// `k` dimensions `[I, J, K, ..., min(M, N)]`.  Then the output is a
10542// tensor of rank `k+1` with dimensions `[I, J, K, ..., M, N]` where:
10543//
10544//   * `output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]` for `m == n`.
10545//   * `output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n]` for `m != n`.
10546//
10547// Arguments:
10548//	input: Rank `k+1`, where `k >= 1`.
10549//	diagonal: Rank `k`, where `k >= 1`.
10550//
10551// Returns Rank `k+1`, with `output.shape = input.shape`.
10552func MatrixSetDiag(scope *Scope, input tf.Output, diagonal tf.Output) (output tf.Output) {
10553	if scope.Err() != nil {
10554		return
10555	}
10556	opspec := tf.OpSpec{
10557		Type: "MatrixSetDiag",
10558		Input: []tf.Input{
10559			input, diagonal,
10560		},
10561	}
10562	op := scope.AddOperation(opspec)
10563	return op.Output(0)
10564}
10565
10566// ParseExampleDatasetV2Attr is an optional argument to ParseExampleDatasetV2.
10567type ParseExampleDatasetV2Attr func(optionalAttr)
10568
10569// ParseExampleDatasetV2Deterministic sets the optional deterministic attribute to value.
10570//
10571// value: A string indicating the op-level determinism to use. Deterministic controls
10572// whether the dataset is allowed to return elements out of order if the next
10573// element to be returned isn't available, but a later element is. Options are
10574// "true", "false", and "default". "default" indicates that determinism should be
10575// decided by the `experimental_deterministic` parameter of `tf.data.Options`.
10576// If not specified, defaults to "default"
10577func ParseExampleDatasetV2Deterministic(value string) ParseExampleDatasetV2Attr {
10578	return func(m optionalAttr) {
10579		m["deterministic"] = value
10580	}
10581}
10582
10583// ParseExampleDatasetV2RaggedKeys sets the optional ragged_keys attribute to value.
10584// If not specified, defaults to <>
10585//
10586// REQUIRES: len(value) >= 0
10587func ParseExampleDatasetV2RaggedKeys(value []string) ParseExampleDatasetV2Attr {
10588	return func(m optionalAttr) {
10589		m["ragged_keys"] = value
10590	}
10591}
10592
10593// ParseExampleDatasetV2RaggedValueTypes sets the optional ragged_value_types attribute to value.
10594// If not specified, defaults to <>
10595//
10596// REQUIRES: len(value) >= 0
10597func ParseExampleDatasetV2RaggedValueTypes(value []tf.DataType) ParseExampleDatasetV2Attr {
10598	return func(m optionalAttr) {
10599		m["ragged_value_types"] = value
10600	}
10601}
10602
10603// ParseExampleDatasetV2RaggedSplitTypes sets the optional ragged_split_types attribute to value.
10604// If not specified, defaults to <>
10605//
10606// REQUIRES: len(value) >= 0
10607func ParseExampleDatasetV2RaggedSplitTypes(value []tf.DataType) ParseExampleDatasetV2Attr {
10608	return func(m optionalAttr) {
10609		m["ragged_split_types"] = value
10610	}
10611}
10612
10613// Transforms `input_dataset` containing `Example` protos as vectors of DT_STRING into a dataset of `Tensor` or `SparseTensor` objects representing the parsed features.
10614//
10615// Arguments:
10616//
10617//
10618//	dense_defaults: A dict mapping string keys to `Tensor`s.
10619// The keys of the dict must match the dense_keys of the feature.
10620//	sparse_keys: A list of string keys in the examples features.
10621// The results for these keys will be returned as `SparseTensor` objects.
10622//	dense_keys: A list of Ndense string Tensors (scalars).
10623// The keys expected in the Examples features associated with dense values.
10624//	sparse_types: A list of `DTypes` of the same length as `sparse_keys`.
10625// Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
10626// and `tf.string` (`BytesList`) are supported.
10627//	dense_shapes: List of tuples with the same length as `dense_keys`.
10628// The shape of the data for each dense feature referenced by `dense_keys`.
10629// Required for any input tensors identified by `dense_keys`.  Must be
10630// either fully defined, or may contain an unknown first dimension.
10631// An unknown first dimension means the feature is treated as having
10632// a variable number of blocks, and the output shape along this dimension
10633// is considered unknown at graph build time.  Padding is applied for
10634// minibatch elements smaller than the maximum number of blocks for the
10635// given feature along this dimension.
10636//	output_types: The type list for the return values.
10637//	output_shapes: The list of shapes being produced.
10638func ParseExampleDatasetV2(scope *Scope, input_dataset tf.Output, num_parallel_calls tf.Output, dense_defaults []tf.Output, sparse_keys []string, dense_keys []string, sparse_types []tf.DataType, dense_shapes []tf.Shape, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ParseExampleDatasetV2Attr) (handle tf.Output) {
10639	if scope.Err() != nil {
10640		return
10641	}
10642	attrs := map[string]interface{}{"sparse_keys": sparse_keys, "dense_keys": dense_keys, "sparse_types": sparse_types, "dense_shapes": dense_shapes, "output_types": output_types, "output_shapes": output_shapes}
10643	for _, a := range optional {
10644		a(attrs)
10645	}
10646	opspec := tf.OpSpec{
10647		Type: "ParseExampleDatasetV2",
10648		Input: []tf.Input{
10649			input_dataset, num_parallel_calls, tf.OutputList(dense_defaults),
10650		},
10651		Attrs: attrs,
10652	}
10653	op := scope.AddOperation(opspec)
10654	return op.Output(0)
10655}
10656
10657// GenerateVocabRemappingAttr is an optional argument to GenerateVocabRemapping.
10658type GenerateVocabRemappingAttr func(optionalAttr)
10659
10660// GenerateVocabRemappingOldVocabSize sets the optional old_vocab_size attribute to value.
10661//
10662// value: Number of entries in the old vocab file to consider.  If -1,
10663// use the entire old vocabulary.
10664// If not specified, defaults to -1
10665//
10666// REQUIRES: value >= -1
10667func GenerateVocabRemappingOldVocabSize(value int64) GenerateVocabRemappingAttr {
10668	return func(m optionalAttr) {
10669		m["old_vocab_size"] = value
10670	}
10671}
10672
10673// Given a path to new and old vocabulary files, returns a remapping Tensor of
10674//
10675// length `num_new_vocab`, where `remapping[i]` contains the row number in the old
10676// vocabulary that corresponds to row `i` in the new vocabulary (starting at line
10677// `new_vocab_offset` and up to `num_new_vocab` entities), or `-1` if entry `i`
10678// in the new vocabulary is not in the old vocabulary.  The old vocabulary is
10679// constrained to the first `old_vocab_size` entries if `old_vocab_size` is not the
10680// default value of -1.
10681//
10682// `num_vocab_offset` enables
10683// use in the partitioned variable case, and should generally be set through
10684// examining partitioning info.  The format of the files should be a text file,
10685// with each line containing a single entity within the vocabulary.
10686//
10687// For example, with `new_vocab_file` a text file containing each of the following
10688// elements on a single line: `[f0, f1, f2, f3]`, old_vocab_file = [f1, f0, f3],
10689// `num_new_vocab = 3, new_vocab_offset = 1`, the returned remapping would be
10690// `[0, -1, 2]`.
10691//
10692// The op also returns a count of how many entries in the new vocabulary
10693// were present in the old vocabulary, which is used to calculate the number of
10694// values to initialize in a weight matrix remapping
10695//
10696// This functionality can be used to remap both row vocabularies (typically,
10697// features) and column vocabularies (typically, classes) from TensorFlow
10698// checkpoints.  Note that the partitioning logic relies on contiguous vocabularies
10699// corresponding to div-partitioned variables.  Moreover, the underlying remapping
10700// uses an IndexTable (as opposed to an inexact CuckooTable), so client code should
10701// use the corresponding index_table_from_file() as the FeatureColumn framework
10702// does (as opposed to tf.feature_to_id(), which uses a CuckooTable).
10703//
10704// Arguments:
10705//	new_vocab_file: Path to the new vocab file.
10706//	old_vocab_file: Path to the old vocab file.
10707//	new_vocab_offset: How many entries into the new vocab file to start reading.
10708//	num_new_vocab: Number of entries in the new vocab file to remap.
10709//
10710// Returns:
10711//	remapping: A Tensor of length num_new_vocab where the element at index i
10712// is equal to the old ID that maps to the new ID i.  This element is -1 for any
10713// new ID that is not found in the old vocabulary.
10714//	num_present: Number of new vocab entries found in old vocab.
10715func GenerateVocabRemapping(scope *Scope, new_vocab_file tf.Output, old_vocab_file tf.Output, new_vocab_offset int64, num_new_vocab int64, optional ...GenerateVocabRemappingAttr) (remapping tf.Output, num_present tf.Output) {
10716	if scope.Err() != nil {
10717		return
10718	}
10719	attrs := map[string]interface{}{"new_vocab_offset": new_vocab_offset, "num_new_vocab": num_new_vocab}
10720	for _, a := range optional {
10721		a(attrs)
10722	}
10723	opspec := tf.OpSpec{
10724		Type: "GenerateVocabRemapping",
10725		Input: []tf.Input{
10726			new_vocab_file, old_vocab_file,
10727		},
10728		Attrs: attrs,
10729	}
10730	op := scope.AddOperation(opspec)
10731	return op.Output(0), op.Output(1)
10732}
10733
10734// Creates a dataset that overrides the maximum intra-op parallelism.
10735//
10736// Arguments:
10737//
10738//	max_intra_op_parallelism: Identifies the maximum intra-op parallelism to use.
10739//
10740//
10741func ExperimentalMaxIntraOpParallelismDataset(scope *Scope, input_dataset tf.Output, max_intra_op_parallelism tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
10742	if scope.Err() != nil {
10743		return
10744	}
10745	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
10746	opspec := tf.OpSpec{
10747		Type: "ExperimentalMaxIntraOpParallelismDataset",
10748		Input: []tf.Input{
10749			input_dataset, max_intra_op_parallelism,
10750		},
10751		Attrs: attrs,
10752	}
10753	op := scope.AddOperation(opspec)
10754	return op.Output(0)
10755}
10756
10757// SpaceToBatch for N-D tensors of type T.
10758//
10759// This operation divides "spatial" dimensions `[1, ..., M]` of the input into a
10760// grid of blocks of shape `block_shape`, and interleaves these blocks with the
10761// "batch" dimension (0) such that in the output, the spatial dimensions
10762// `[1, ..., M]` correspond to the position within the grid, and the batch
10763// dimension combines both the position within a spatial block and the original
10764// batch position.  Prior to division into blocks, the spatial dimensions of the
10765// input are optionally zero padded according to `paddings`.  See below for a
10766// precise description.
10767//
10768// Arguments:
10769//	input: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,
10770// where spatial_shape has `M` dimensions.
10771//	block_shape: 1-D with shape `[M]`, all values must be >= 1.
10772//	paddings: 2-D with shape `[M, 2]`, all values must be >= 0.
10773//   `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension
10774//   `i + 1`, which corresponds to spatial dimension `i`.  It is required that
10775//   `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`.
10776//
10777// This operation is equivalent to the following steps:
10778//
10779// 1. Zero-pad the start and end of dimensions `[1, ..., M]` of the
10780//    input according to `paddings` to produce `padded` of shape `padded_shape`.
10781//
10782// 2. Reshape `padded` to `reshaped_padded` of shape:
10783//
10784//      [batch] +
10785//      [padded_shape[1] / block_shape[0],
10786//        block_shape[0],
10787//       ...,
10788//       padded_shape[M] / block_shape[M-1],
10789//       block_shape[M-1]] +
10790//      remaining_shape
10791//
10792// 3. Permute dimensions of `reshaped_padded` to produce
10793//    `permuted_reshaped_padded` of shape:
10794//
10795//      block_shape +
10796//      [batch] +
10797//      [padded_shape[1] / block_shape[0],
10798//       ...,
10799//       padded_shape[M] / block_shape[M-1]] +
10800//      remaining_shape
10801//
10802// 4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch
10803//    dimension, producing an output tensor of shape:
10804//
10805//      [batch * prod(block_shape)] +
10806//      [padded_shape[1] / block_shape[0],
10807//       ...,
10808//       padded_shape[M] / block_shape[M-1]] +
10809//      remaining_shape
10810//
10811// Some examples:
10812//
10813// (1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and
10814//     `paddings = [[0, 0], [0, 0]]`:
10815//
10816// ```
10817// x = [[[[1], [2]], [[3], [4]]]]
10818// ```
10819//
10820// The output tensor has shape `[4, 1, 1, 1]` and value:
10821//
10822// ```
10823// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
10824// ```
10825//
10826// (2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and
10827//     `paddings = [[0, 0], [0, 0]]`:
10828//
10829// ```
10830// x = [[[[1, 2, 3], [4, 5, 6]],
10831//       [[7, 8, 9], [10, 11, 12]]]]
10832// ```
10833//
10834// The output tensor has shape `[4, 1, 1, 3]` and value:
10835//
10836// ```
10837// [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
10838// ```
10839//
10840// (3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and
10841//     `paddings = [[0, 0], [0, 0]]`:
10842//
10843// ```
10844// x = [[[[1],   [2],  [3],  [4]],
10845//       [[5],   [6],  [7],  [8]],
10846//       [[9],  [10], [11],  [12]],
10847//       [[13], [14], [15],  [16]]]]
10848// ```
10849//
10850// The output tensor has shape `[4, 2, 2, 1]` and value:
10851//
10852// ```
10853// x = [[[[1], [3]], [[9], [11]]],
10854//      [[[2], [4]], [[10], [12]]],
10855//      [[[5], [7]], [[13], [15]]],
10856//      [[[6], [8]], [[14], [16]]]]
10857// ```
10858//
10859// (4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and
10860//     paddings = `[[0, 0], [2, 0]]`:
10861//
10862// ```
10863// x = [[[[1],   [2],  [3],  [4]],
10864//       [[5],   [6],  [7],  [8]]],
10865//      [[[9],  [10], [11],  [12]],
10866//       [[13], [14], [15],  [16]]]]
10867// ```
10868//
10869// The output tensor has shape `[8, 1, 3, 1]` and value:
10870//
10871// ```
10872// x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
10873//      [[[0], [2], [4]]], [[[0], [10], [12]]],
10874//      [[[0], [5], [7]]], [[[0], [13], [15]]],
10875//      [[[0], [6], [8]]], [[[0], [14], [16]]]]
10876// ```
10877//
10878// Among others, this operation is useful for reducing atrous convolution into
10879// regular convolution.
10880func SpaceToBatchND(scope *Scope, input tf.Output, block_shape tf.Output, paddings tf.Output) (output tf.Output) {
10881	if scope.Err() != nil {
10882		return
10883	}
10884	opspec := tf.OpSpec{
10885		Type: "SpaceToBatchND",
10886		Input: []tf.Input{
10887			input, block_shape, paddings,
10888		},
10889	}
10890	op := scope.AddOperation(opspec)
10891	return op.Output(0)
10892}
10893
10894// Returns a batched diagonal tensor with given batched diagonal values.
10895//
10896// Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th
10897// diagonals of a matrix, with everything else padded with `padding`. `num_rows`
10898// and `num_cols` specify the dimension of the innermost matrix of the output. If
10899// both are not specified, the op assumes the innermost matrix is square and infers
10900// its size from `k` and the innermost dimension of `diagonal`. If only one of them
10901// is specified, the op assumes the unspecified value is the smallest possible
10902// based on other criteria.
10903//
10904// Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor has
10905// rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only one
10906// diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has rank
10907// `r` with shape `[I, J, ..., L, num_rows, num_cols]`.
10908//
10909// The second innermost dimension of `diagonal` has double meaning.
10910// When `k` is scalar or `k[0] == k[1]`, `M` is part of the batch size
10911// [I, J, ..., M], and the output tensor is:
10912//
10913// ```
10914// output[i, j, ..., l, m, n]
10915//   = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper
10916//     padding_value                             ; otherwise
10917// ```
10918//
10919// Otherwise, `M` is treated as the number of diagonals for the matrix in the
10920// same batch (`M = k[1]-k[0]+1`), and the output tensor is:
10921//
10922// ```
10923// output[i, j, ..., l, m, n]
10924//   = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]
10925//     padding_value                                     ; otherwise
10926// ```
10927// where `d = n - m`, `diag_index = k[1] - d`, and `index_in_diag = n - max(d, 0)`.
10928//
10929// For example:
10930//
10931// ```
10932// # The main diagonal.
10933// diagonal = np.array([[1, 2, 3, 4],            # Input shape: (2, 4)
10934//                      [5, 6, 7, 8]])
10935// tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0],  # Output shape: (2, 4, 4)
10936//                                [0, 2, 0, 0],
10937//                                [0, 0, 3, 0],
10938//                                [0, 0, 0, 4]],
10939//                               [[5, 0, 0, 0],
10940//                                [0, 6, 0, 0],
10941//                                [0, 0, 7, 0],
10942//                                [0, 0, 0, 8]]]
10943//
10944// # A superdiagonal (per batch).
10945// diagonal = np.array([[1, 2, 3],  # Input shape: (2, 3)
10946//                      [4, 5, 6]])
10947// tf.matrix_diag(diagonal, k = 1)
10948//   ==> [[[0, 1, 0, 0],  # Output shape: (2, 4, 4)
10949//         [0, 0, 2, 0],
10950//         [0, 0, 0, 3],
10951//         [0, 0, 0, 0]],
10952//        [[0, 4, 0, 0],
10953//         [0, 0, 5, 0],
10954//         [0, 0, 0, 6],
10955//         [0, 0, 0, 0]]]
10956//
10957// # A band of diagonals.
10958// diagonals = np.array([[[1, 2, 3],  # Input shape: (2, 2, 3)
10959//                        [4, 5, 0]],
10960//                       [[6, 7, 9],
10961//                        [9, 1, 0]]])
10962// tf.matrix_diag(diagonals, k = (-1, 0))
10963//   ==> [[[1, 0, 0],  # Output shape: (2, 3, 3)
10964//         [4, 2, 0],
10965//         [0, 5, 3]],
10966//        [[6, 0, 0],
10967//         [9, 7, 0],
10968//         [0, 1, 9]]]
10969//
10970// # Rectangular matrix.
10971// diagonal = np.array([1, 2])  # Input shape: (2)
10972// tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4)
10973//   ==> [[0, 0, 0, 0],  # Output shape: (3, 4)
10974//        [1, 0, 0, 0],
10975//        [0, 2, 0, 0]]
10976//
10977// # Rectangular matrix with inferred num_cols and padding_value = 9.
10978// tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9)
10979//   ==> [[9, 9],  # Output shape: (3, 2)
10980//        [1, 9],
10981//        [9, 2]]
10982// ```
10983//
10984// Arguments:
10985//	diagonal: Rank `r`, where `r >= 1`
10986//	k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
10987// diagonal, and negative value means subdiagonals. `k` can be a single integer
10988// (for a single diagonal) or a pair of integers specifying the low and high ends
10989// of a matrix band. `k[0]` must not be larger than `k[1]`.
10990//	num_rows: The number of rows of the output matrix. If it is not provided, the op assumes
10991// the output matrix is a square matrix and infers the matrix size from k and the
10992// innermost dimension of `diagonal`.
10993//	num_cols: The number of columns of the output matrix. If it is not provided, the op
10994// assumes the output matrix is a square matrix and infers the matrix size from
10995// k and the innermost dimension of `diagonal`.
10996//	padding_value: The number to fill the area outside the specified diagonal band with.
10997// Default is 0.
10998//
10999// Returns Has rank `r+1` when `k` is an integer or `k[0] == k[1]`, rank `r` otherwise.
11000func MatrixDiagV2(scope *Scope, diagonal tf.Output, k tf.Output, num_rows tf.Output, num_cols tf.Output, padding_value tf.Output) (output tf.Output) {
11001	if scope.Err() != nil {
11002		return
11003	}
11004	opspec := tf.OpSpec{
11005		Type: "MatrixDiagV2",
11006		Input: []tf.Input{
11007			diagonal, k, num_rows, num_cols, padding_value,
11008		},
11009	}
11010	op := scope.AddOperation(opspec)
11011	return op.Output(0)
11012}
11013
11014// Creates a dataset that overrides the maximum intra-op parallelism.
11015//
11016// Arguments:
11017//
11018//	max_intra_op_parallelism: Identifies the maximum intra-op parallelism to use.
11019//
11020//
11021func MaxIntraOpParallelismDataset(scope *Scope, input_dataset tf.Output, max_intra_op_parallelism tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
11022	if scope.Err() != nil {
11023		return
11024	}
11025	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
11026	opspec := tf.OpSpec{
11027		Type: "MaxIntraOpParallelismDataset",
11028		Input: []tf.Input{
11029			input_dataset, max_intra_op_parallelism,
11030		},
11031		Attrs: attrs,
11032	}
11033	op := scope.AddOperation(opspec)
11034	return op.Output(0)
11035}
11036
11037// StageClearAttr is an optional argument to StageClear.
11038type StageClearAttr func(optionalAttr)
11039
11040// StageClearCapacity sets the optional capacity attribute to value.
11041// If not specified, defaults to 0
11042//
11043// REQUIRES: value >= 0
11044func StageClearCapacity(value int64) StageClearAttr {
11045	return func(m optionalAttr) {
11046		m["capacity"] = value
11047	}
11048}
11049
11050// StageClearMemoryLimit sets the optional memory_limit attribute to value.
11051// If not specified, defaults to 0
11052//
11053// REQUIRES: value >= 0
11054func StageClearMemoryLimit(value int64) StageClearAttr {
11055	return func(m optionalAttr) {
11056		m["memory_limit"] = value
11057	}
11058}
11059
11060// StageClearContainer sets the optional container attribute to value.
11061// If not specified, defaults to ""
11062func StageClearContainer(value string) StageClearAttr {
11063	return func(m optionalAttr) {
11064		m["container"] = value
11065	}
11066}
11067
11068// StageClearSharedName sets the optional shared_name attribute to value.
11069// If not specified, defaults to ""
11070func StageClearSharedName(value string) StageClearAttr {
11071	return func(m optionalAttr) {
11072		m["shared_name"] = value
11073	}
11074}
11075
11076// Op removes all elements in the underlying container.
11077//
11078// Returns the created operation.
11079func StageClear(scope *Scope, dtypes []tf.DataType, optional ...StageClearAttr) (o *tf.Operation) {
11080	if scope.Err() != nil {
11081		return
11082	}
11083	attrs := map[string]interface{}{"dtypes": dtypes}
11084	for _, a := range optional {
11085		a(attrs)
11086	}
11087	opspec := tf.OpSpec{
11088		Type: "StageClear",
11089
11090		Attrs: attrs,
11091	}
11092	return scope.AddOperation(opspec)
11093}
11094
11095// Records the latency of producing `input_dataset` elements in a StatsAggregator.
11096func ExperimentalLatencyStatsDataset(scope *Scope, input_dataset tf.Output, tag tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
11097	if scope.Err() != nil {
11098		return
11099	}
11100	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
11101	opspec := tf.OpSpec{
11102		Type: "ExperimentalLatencyStatsDataset",
11103		Input: []tf.Input{
11104			input_dataset, tag,
11105		},
11106		Attrs: attrs,
11107	}
11108	op := scope.AddOperation(opspec)
11109	return op.Output(0)
11110}
11111
11112// Returns the name of the device on which `resource` has been placed.
11113func IteratorGetDevice(scope *Scope, resource tf.Output) (device tf.Output) {
11114	if scope.Err() != nil {
11115		return
11116	}
11117	opspec := tf.OpSpec{
11118		Type: "IteratorGetDevice",
11119		Input: []tf.Input{
11120			resource,
11121		},
11122	}
11123	op := scope.AddOperation(opspec)
11124	return op.Output(0)
11125}
11126
11127// Creates a Dataset that returns pseudorandom numbers.
11128//
11129// Arguments:
11130//	seed: A scalar seed for the random number generator. If either seed or
11131// seed2 is set to be non-zero, the random number generator is seeded
11132// by the given seed.  Otherwise, a random seed is used.
11133//	seed2: A second scalar seed to avoid seed collision.
11134//
11135//
11136func ExperimentalRandomDataset(scope *Scope, seed tf.Output, seed2 tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
11137	if scope.Err() != nil {
11138		return
11139	}
11140	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
11141	opspec := tf.OpSpec{
11142		Type: "ExperimentalRandomDataset",
11143		Input: []tf.Input{
11144			seed, seed2,
11145		},
11146		Attrs: attrs,
11147	}
11148	op := scope.AddOperation(opspec)
11149	return op.Output(0)
11150}
11151
11152// ExperimentalIgnoreErrorsDatasetAttr is an optional argument to ExperimentalIgnoreErrorsDataset.
11153type ExperimentalIgnoreErrorsDatasetAttr func(optionalAttr)
11154
11155// ExperimentalIgnoreErrorsDatasetLogWarning sets the optional log_warning attribute to value.
11156// If not specified, defaults to false
11157func ExperimentalIgnoreErrorsDatasetLogWarning(value bool) ExperimentalIgnoreErrorsDatasetAttr {
11158	return func(m optionalAttr) {
11159		m["log_warning"] = value
11160	}
11161}
11162
11163// Creates a dataset that contains the elements of `input_dataset` ignoring errors.
11164func ExperimentalIgnoreErrorsDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ExperimentalIgnoreErrorsDatasetAttr) (handle tf.Output) {
11165	if scope.Err() != nil {
11166		return
11167	}
11168	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
11169	for _, a := range optional {
11170		a(attrs)
11171	}
11172	opspec := tf.OpSpec{
11173		Type: "ExperimentalIgnoreErrorsDataset",
11174		Input: []tf.Input{
11175			input_dataset,
11176		},
11177		Attrs: attrs,
11178	}
11179	op := scope.AddOperation(opspec)
11180	return op.Output(0)
11181}
11182
11183// CudnnRNNBackpropV2Attr is an optional argument to CudnnRNNBackpropV2.
11184type CudnnRNNBackpropV2Attr func(optionalAttr)
11185
11186// CudnnRNNBackpropV2RnnMode sets the optional rnn_mode attribute to value.
11187// If not specified, defaults to "lstm"
11188func CudnnRNNBackpropV2RnnMode(value string) CudnnRNNBackpropV2Attr {
11189	return func(m optionalAttr) {
11190		m["rnn_mode"] = value
11191	}
11192}
11193
11194// CudnnRNNBackpropV2InputMode sets the optional input_mode attribute to value.
11195// If not specified, defaults to "linear_input"
11196func CudnnRNNBackpropV2InputMode(value string) CudnnRNNBackpropV2Attr {
11197	return func(m optionalAttr) {
11198		m["input_mode"] = value
11199	}
11200}
11201
11202// CudnnRNNBackpropV2Direction sets the optional direction attribute to value.
11203// If not specified, defaults to "unidirectional"
11204func CudnnRNNBackpropV2Direction(value string) CudnnRNNBackpropV2Attr {
11205	return func(m optionalAttr) {
11206		m["direction"] = value
11207	}
11208}
11209
11210// CudnnRNNBackpropV2Dropout sets the optional dropout attribute to value.
11211// If not specified, defaults to 0
11212func CudnnRNNBackpropV2Dropout(value float32) CudnnRNNBackpropV2Attr {
11213	return func(m optionalAttr) {
11214		m["dropout"] = value
11215	}
11216}
11217
11218// CudnnRNNBackpropV2Seed sets the optional seed attribute to value.
11219// If not specified, defaults to 0
11220func CudnnRNNBackpropV2Seed(value int64) CudnnRNNBackpropV2Attr {
11221	return func(m optionalAttr) {
11222		m["seed"] = value
11223	}
11224}
11225
11226// CudnnRNNBackpropV2Seed2 sets the optional seed2 attribute to value.
11227// If not specified, defaults to 0
11228func CudnnRNNBackpropV2Seed2(value int64) CudnnRNNBackpropV2Attr {
11229	return func(m optionalAttr) {
11230		m["seed2"] = value
11231	}
11232}
11233
11234// Backprop step of CudnnRNN.
11235//
11236// Compute the backprop of both data and weights in a RNN. Takes an extra
11237//     "host_reserved" inupt than CudnnRNNBackprop, which is used to determine RNN
11238//     cudnnRNNAlgo_t and cudnnMathType_t.
11239//
11240// rnn_mode: Indicates the type of the RNN model.
11241// input_mode: Indicates whether there is a linear projection between the input and
11242//     the actual computation before the first layer. 'skip_input' is only allowed
11243//     when input_size == num_units; 'auto_select' implies 'skip_input' when
11244//     input_size == num_units; otherwise, it implies 'linear_input'.
11245// direction: Indicates whether a bidirectional model will be used. Should be
11246//   "unidirectional" or "bidirectional".
11247// dropout: Dropout probability. When set to 0., dropout is disabled.
11248// seed: The 1st part of a seed to initialize dropout.
11249// seed2: The 2nd part of a seed to initialize dropout.
11250// input: A 3-D tensor with the shape of [seq_length, batch_size, input_size].
11251// input_h: A 3-D tensor with the shape of [num_layer * dir, batch_size,
11252//     num_units].
11253// input_c: For LSTM, a 3-D tensor with the shape of
11254//     [num_layer * dir, batch, num_units]. For other models, it is ignored.
11255// params: A 1-D tensor that contains the weights and biases in an opaque layout.
11256//     The size must be created through CudnnRNNParamsSize, and initialized
11257//     separately. Note that they might not be compatible across different
11258//     generations. So it is a good idea to save and restore
11259// output: A 3-D tensor with the shape of [seq_length, batch_size,
11260//     dir * num_units].
11261// output_h: The same shape has input_h.
11262// output_c: The same shape as input_c for LSTM. An empty tensor for other models.
11263// output_backprop: A 3-D tensor with the same shape as output in the forward pass.
11264// output_h_backprop: A 3-D tensor with the same shape as output_h in the forward
11265//     pass.
11266// output_c_backprop: A 3-D tensor with the same shape as output_c in the forward
11267//     pass.
11268// reserve_space: The same reserve_space produced in the forward operation.
11269// host_reserved: The same host_reserved produced in the forward operation.
11270// input_backprop: The backprop to input in the forward pass. Has the same shape
11271//     as input.
11272// input_h_backprop: The backprop to input_h in the forward pass. Has the same
11273//     shape as input_h.
11274// input_c_backprop: The backprop to input_c in the forward pass. Has the same
11275//     shape as input_c.
11276// params_backprop: The backprop to the params buffer in the forward pass. Has the
11277//     same shape as params.
11278func CudnnRNNBackpropV2(scope *Scope, input tf.Output, input_h tf.Output, input_c tf.Output, params tf.Output, output tf.Output, output_h tf.Output, output_c tf.Output, output_backprop tf.Output, output_h_backprop tf.Output, output_c_backprop tf.Output, reserve_space tf.Output, host_reserved tf.Output, optional ...CudnnRNNBackpropV2Attr) (input_backprop tf.Output, input_h_backprop tf.Output, input_c_backprop tf.Output, params_backprop tf.Output) {
11279	if scope.Err() != nil {
11280		return
11281	}
11282	attrs := map[string]interface{}{}
11283	for _, a := range optional {
11284		a(attrs)
11285	}
11286	opspec := tf.OpSpec{
11287		Type: "CudnnRNNBackpropV2",
11288		Input: []tf.Input{
11289			input, input_h, input_c, params, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space, host_reserved,
11290		},
11291		Attrs: attrs,
11292	}
11293	op := scope.AddOperation(opspec)
11294	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
11295}
11296
11297// A substitute for `InterleaveDataset` on a fixed list of `N` datasets.
11298//
11299// Arguments:
11300//	selector_input_dataset: A dataset of scalar `DT_INT64` elements that determines which of the
11301// `N` data inputs should produce the next output element.
11302//	data_input_datasets: `N` datasets with the same type that will be interleaved according to
11303// the values of `selector_input_dataset`.
11304//
11305//
11306func DirectedInterleaveDataset(scope *Scope, selector_input_dataset tf.Output, data_input_datasets []tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
11307	if scope.Err() != nil {
11308		return
11309	}
11310	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
11311	opspec := tf.OpSpec{
11312		Type: "DirectedInterleaveDataset",
11313		Input: []tf.Input{
11314			selector_input_dataset, tf.OutputList(data_input_datasets),
11315		},
11316		Attrs: attrs,
11317	}
11318	op := scope.AddOperation(opspec)
11319	return op.Output(0)
11320}
11321
11322// Creates a dataset that batches input elements into a SparseTensor.
11323//
11324// Arguments:
11325//	input_dataset: A handle to an input dataset. Must have a single component.
11326//	batch_size: A scalar representing the number of elements to accumulate in a
11327// batch.
11328//	row_shape: A vector representing the dense shape of each row in the produced
11329// SparseTensor. The shape may be partially specified, using `-1` to indicate
11330// that a particular dimension should use the maximum size of all batch elements.
11331//
11332//
11333func ExperimentalDenseToSparseBatchDataset(scope *Scope, input_dataset tf.Output, batch_size tf.Output, row_shape tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
11334	if scope.Err() != nil {
11335		return
11336	}
11337	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
11338	opspec := tf.OpSpec{
11339		Type: "ExperimentalDenseToSparseBatchDataset",
11340		Input: []tf.Input{
11341			input_dataset, batch_size, row_shape,
11342		},
11343		Attrs: attrs,
11344	}
11345	op := scope.AddOperation(opspec)
11346	return op.Output(0)
11347}
11348
11349// Writes the given dataset to the given file using the TFRecord format.
11350//
11351// Arguments:
11352//	input_dataset: A variant tensor representing the dataset to write.
11353//	filename: A scalar string tensor representing the filename to use.
11354//	compression_type: A scalar string tensor containing either (i) the empty string (no
11355// compression), (ii) "ZLIB", or (iii) "GZIP".
11356//
11357// Returns the created operation.
11358func ExperimentalDatasetToTFRecord(scope *Scope, input_dataset tf.Output, filename tf.Output, compression_type tf.Output) (o *tf.Operation) {
11359	if scope.Err() != nil {
11360		return
11361	}
11362	opspec := tf.OpSpec{
11363		Type: "ExperimentalDatasetToTFRecord",
11364		Input: []tf.Input{
11365			input_dataset, filename, compression_type,
11366		},
11367	}
11368	return scope.AddOperation(opspec)
11369}
11370
11371// Creates a dataset from the given `graph_def`.
11372//
11373// Creates a dataset from the provided `graph_def`.
11374//
11375// Arguments:
11376//	graph_def: The graph representation of the dataset (as serialized GraphDef).
11377//
11378// Returns A variant tensor representing the dataset.
11379func DatasetFromGraph(scope *Scope, graph_def tf.Output) (handle tf.Output) {
11380	if scope.Err() != nil {
11381		return
11382	}
11383	opspec := tf.OpSpec{
11384		Type: "DatasetFromGraph",
11385		Input: []tf.Input{
11386			graph_def,
11387		},
11388	}
11389	op := scope.AddOperation(opspec)
11390	return op.Output(0)
11391}
11392
11393// Returns the cardinality of `input_dataset`.
11394//
11395// Returns the cardinality of `input_dataset`.
11396//
11397// Arguments:
11398//	input_dataset: A variant tensor representing the dataset to return cardinality for.
11399//
11400// Returns The cardinality of `input_dataset`. Named constants are used to represent
11401// infinite and unknown cardinality.
11402func ExperimentalDatasetCardinality(scope *Scope, input_dataset tf.Output) (cardinality tf.Output) {
11403	if scope.Err() != nil {
11404		return
11405	}
11406	opspec := tf.OpSpec{
11407		Type: "ExperimentalDatasetCardinality",
11408		Input: []tf.Input{
11409			input_dataset,
11410		},
11411	}
11412	op := scope.AddOperation(opspec)
11413	return op.Output(0)
11414}
11415
11416// Interleave the values from the `data` tensors into a single tensor.
11417//
11418// Builds a merged tensor such that
11419//
11420// ```python
11421//     merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
11422// ```
11423//
11424// For example, if each `indices[m]` is scalar or vector, we have
11425//
11426// ```python
11427//     # Scalar indices:
11428//     merged[indices[m], ...] = data[m][...]
11429//
11430//     # Vector indices:
11431//     merged[indices[m][i], ...] = data[m][i, ...]
11432// ```
11433//
11434// Each `data[i].shape` must start with the corresponding `indices[i].shape`,
11435// and the rest of `data[i].shape` must be constant w.r.t. `i`.  That is, we
11436// must have `data[i].shape = indices[i].shape + constant`.  In terms of this
11437// `constant`, the output shape is
11438//
11439//     merged.shape = [max(indices)] + constant
11440//
11441// Values are merged in order, so if an index appears in both `indices[m][i]` and
11442// `indices[n][j]` for `(m,i) < (n,j)` the slice `data[n][j]` will appear in the
11443// merged result. If you do not need this guarantee, ParallelDynamicStitch might
11444// perform better on some devices.
11445//
11446// For example:
11447//
11448// ```python
11449//     indices[0] = 6
11450//     indices[1] = [4, 1]
11451//     indices[2] = [[5, 2], [0, 3]]
11452//     data[0] = [61, 62]
11453//     data[1] = [[41, 42], [11, 12]]
11454//     data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
11455//     merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
11456//               [51, 52], [61, 62]]
11457// ```
11458//
11459// This method can be used to merge partitions created by `dynamic_partition`
11460// as illustrated on the following example:
11461//
11462// ```python
11463//     # Apply function (increments x_i) on elements for which a certain condition
11464//     # apply (x_i != -1 in this example).
11465//     x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])
11466//     condition_mask=tf.not_equal(x,tf.constant(-1.))
11467//     partitioned_data = tf.dynamic_partition(
11468//         x, tf.cast(condition_mask, tf.int32) , 2)
11469//     partitioned_data[1] = partitioned_data[1] + 1.0
11470//     condition_indices = tf.dynamic_partition(
11471//         tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)
11472//     x = tf.dynamic_stitch(condition_indices, partitioned_data)
11473//     # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain
11474//     # unchanged.
11475// ```
11476//
11477// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
11478// <img style="width:100%" src="https://www.tensorflow.org/images/DynamicStitch.png" alt>
11479// </div>
11480func DynamicStitch(scope *Scope, indices []tf.Output, data []tf.Output) (merged tf.Output) {
11481	if scope.Err() != nil {
11482		return
11483	}
11484	opspec := tf.OpSpec{
11485		Type: "DynamicStitch",
11486		Input: []tf.Input{
11487			tf.OutputList(indices), tf.OutputList(data),
11488		},
11489	}
11490	op := scope.AddOperation(opspec)
11491	return op.Output(0)
11492}
11493
11494// RaggedCountSparseOutputAttr is an optional argument to RaggedCountSparseOutput.
11495type RaggedCountSparseOutputAttr func(optionalAttr)
11496
11497// RaggedCountSparseOutputMinlength sets the optional minlength attribute to value.
11498//
11499// value: Minimum value to count. Can be set to -1 for no minimum.
11500// If not specified, defaults to -1
11501//
11502// REQUIRES: value >= -1
11503func RaggedCountSparseOutputMinlength(value int64) RaggedCountSparseOutputAttr {
11504	return func(m optionalAttr) {
11505		m["minlength"] = value
11506	}
11507}
11508
11509// RaggedCountSparseOutputMaxlength sets the optional maxlength attribute to value.
11510//
11511// value: Maximum value to count. Can be set to -1 for no maximum.
11512// If not specified, defaults to -1
11513//
11514// REQUIRES: value >= -1
11515func RaggedCountSparseOutputMaxlength(value int64) RaggedCountSparseOutputAttr {
11516	return func(m optionalAttr) {
11517		m["maxlength"] = value
11518	}
11519}
11520
11521// Performs sparse-output bin counting for a ragged tensor input.
11522//
11523//   Counts the number of times each value occurs in the input.
11524//
11525// Arguments:
11526//	splits: Tensor containing the row splits of the ragged tensor to count.
11527//	values: Tensor containing values of the sparse tensor to count.
11528//	weights: A Tensor of the same shape as indices containing per-index weight values.
11529// May also be the empty tensor if no weights are used.
11530//	binary_output: Whether to output the number of occurrences of each value or 1.
11531//
11532// Returns:
11533//	output_indices: Indices tensor for the resulting sparse tensor object.
11534//	output_values: Values tensor for the resulting sparse tensor object.
11535//	output_dense_shape: Shape tensor for the resulting sparse tensor object.
11536//   END
11537//   }
11538//   attr {
11539//     name: "T"
11540//     description: <<END
11541// Dtype of the input values tensor.
11542func RaggedCountSparseOutput(scope *Scope, splits tf.Output, values tf.Output, weights tf.Output, binary_output bool, optional ...RaggedCountSparseOutputAttr) (output_indices tf.Output, output_values tf.Output, output_dense_shape tf.Output) {
11543	if scope.Err() != nil {
11544		return
11545	}
11546	attrs := map[string]interface{}{"binary_output": binary_output}
11547	for _, a := range optional {
11548		a(attrs)
11549	}
11550	opspec := tf.OpSpec{
11551		Type: "RaggedCountSparseOutput",
11552		Input: []tf.Input{
11553			splits, values, weights,
11554		},
11555		Attrs: attrs,
11556	}
11557	op := scope.AddOperation(opspec)
11558	return op.Output(0), op.Output(1), op.Output(2)
11559}
11560
11561// Gets the next output from the given iterator .
11562func IteratorGetNext(scope *Scope, iterator tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (components []tf.Output) {
11563	if scope.Err() != nil {
11564		return
11565	}
11566	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
11567	opspec := tf.OpSpec{
11568		Type: "IteratorGetNext",
11569		Input: []tf.Input{
11570			iterator,
11571		},
11572		Attrs: attrs,
11573	}
11574	op := scope.AddOperation(opspec)
11575	if scope.Err() != nil {
11576		return
11577	}
11578	var idx int
11579	var err error
11580	if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
11581		scope.UpdateErr("IteratorGetNext", err)
11582		return
11583	}
11584	return components
11585}
11586
11587// Computes the static batch size of a dataset sans partial batches.
11588func ComputeBatchSize(scope *Scope, input_dataset tf.Output) (batch_size tf.Output) {
11589	if scope.Err() != nil {
11590		return
11591	}
11592	opspec := tf.OpSpec{
11593		Type: "ComputeBatchSize",
11594		Input: []tf.Input{
11595			input_dataset,
11596		},
11597	}
11598	op := scope.AddOperation(opspec)
11599	return op.Output(0)
11600}
11601
11602// Uncompresses a compressed dataset element.
11603func UncompressElement(scope *Scope, compressed tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (components []tf.Output) {
11604	if scope.Err() != nil {
11605		return
11606	}
11607	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
11608	opspec := tf.OpSpec{
11609		Type: "UncompressElement",
11610		Input: []tf.Input{
11611			compressed,
11612		},
11613		Attrs: attrs,
11614	}
11615	op := scope.AddOperation(opspec)
11616	if scope.Err() != nil {
11617		return
11618	}
11619	var idx int
11620	var err error
11621	if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
11622		scope.UpdateErr("UncompressElement", err)
11623		return
11624	}
11625	return components
11626}
11627
11628// Records the bytes size of each element of `input_dataset` in a StatsAggregator.
11629func BytesProducedStatsDataset(scope *Scope, input_dataset tf.Output, tag tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
11630	if scope.Err() != nil {
11631		return
11632	}
11633	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
11634	opspec := tf.OpSpec{
11635		Type: "BytesProducedStatsDataset",
11636		Input: []tf.Input{
11637			input_dataset, tag,
11638		},
11639		Attrs: attrs,
11640	}
11641	op := scope.AddOperation(opspec)
11642	return op.Output(0)
11643}
11644
11645// ExperimentalAutoShardDatasetAttr is an optional argument to ExperimentalAutoShardDataset.
11646type ExperimentalAutoShardDatasetAttr func(optionalAttr)
11647
11648// ExperimentalAutoShardDatasetAutoShardPolicy sets the optional auto_shard_policy attribute to value.
11649// If not specified, defaults to 0
11650func ExperimentalAutoShardDatasetAutoShardPolicy(value int64) ExperimentalAutoShardDatasetAttr {
11651	return func(m optionalAttr) {
11652		m["auto_shard_policy"] = value
11653	}
11654}
11655
11656// Creates a dataset that shards the input dataset.
11657//
11658// Creates a dataset that shards the input dataset by num_workers, returning a
11659// sharded dataset for the index-th worker. This attempts to automatically shard
11660// a dataset by examining the Dataset graph and inserting a shard op before the
11661// inputs to a reader Dataset (e.g. CSVDataset, TFRecordDataset).
11662//
11663// This dataset will throw a NotFound error if we cannot shard the dataset
11664// automatically.
11665//
11666// Arguments:
11667//	input_dataset: A variant tensor representing the input dataset.
11668//	num_workers: A scalar representing the number of workers to distribute this dataset across.
11669//	index: A scalar representing the index of the current worker out of num_workers.
11670//
11671//
11672func ExperimentalAutoShardDataset(scope *Scope, input_dataset tf.Output, num_workers tf.Output, index tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ExperimentalAutoShardDatasetAttr) (handle tf.Output) {
11673	if scope.Err() != nil {
11674		return
11675	}
11676	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
11677	for _, a := range optional {
11678		a(attrs)
11679	}
11680	opspec := tf.OpSpec{
11681		Type: "ExperimentalAutoShardDataset",
11682		Input: []tf.Input{
11683			input_dataset, num_workers, index,
11684		},
11685		Attrs: attrs,
11686	}
11687	op := scope.AddOperation(opspec)
11688	return op.Output(0)
11689}
11690
11691// A transformation that asserts which transformations happen next.
11692//
11693// This transformation checks whether the camel-case names (i.e. "FlatMap", not
11694// "flat_map") of the transformations following this transformation match the list
11695// of names in the `transformations` argument. If there is a mismatch, the
11696// transformation raises an exception.
11697//
11698// The check occurs when iterating over the contents of the dataset, which
11699// means that the check happens *after* any static optimizations are applied
11700// to the dataset graph.
11701//
11702// Arguments:
11703//	input_dataset: A variant tensor representing the input dataset.
11704// `AssertNextDataset` passes through the outputs of its input dataset.
11705//	transformations: A `tf.string` vector `tf.Tensor` identifying the transformations that are
11706// expected to happen next.
11707//
11708//
11709func AssertNextDataset(scope *Scope, input_dataset tf.Output, transformations tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
11710	if scope.Err() != nil {
11711		return
11712	}
11713	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
11714	opspec := tf.OpSpec{
11715		Type: "AssertNextDataset",
11716		Input: []tf.Input{
11717			input_dataset, transformations,
11718		},
11719		Attrs: attrs,
11720	}
11721	op := scope.AddOperation(opspec)
11722	return op.Output(0)
11723}
11724
11725// Return the index of device the op runs.
11726//
11727// Given a list of device names, this operation returns the index of the device
11728// this op runs. The length of the list is returned in two cases:
11729// (1) Device does not exist in the given device list.
11730// (2) It is in XLA compilation.
11731func DeviceIndex(scope *Scope, device_names []string) (index tf.Output) {
11732	if scope.Err() != nil {
11733		return
11734	}
11735	attrs := map[string]interface{}{"device_names": device_names}
11736	opspec := tf.OpSpec{
11737		Type: "DeviceIndex",
11738
11739		Attrs: attrs,
11740	}
11741	op := scope.AddOperation(opspec)
11742	return op.Output(0)
11743}
11744
11745// ShardDatasetAttr is an optional argument to ShardDataset.
11746type ShardDatasetAttr func(optionalAttr)
11747
11748// ShardDatasetRequireNonEmpty sets the optional require_non_empty attribute to value.
11749// If not specified, defaults to false
11750func ShardDatasetRequireNonEmpty(value bool) ShardDatasetAttr {
11751	return func(m optionalAttr) {
11752		m["require_non_empty"] = value
11753	}
11754}
11755
11756// Creates a `Dataset` that includes only 1/`num_shards` of this dataset.
11757//
11758// Arguments:
11759//
11760//	num_shards: An integer representing the number of shards operating in parallel.
11761//	index: An integer representing the current worker index.
11762//
11763//
11764func ShardDataset(scope *Scope, input_dataset tf.Output, num_shards tf.Output, index tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ShardDatasetAttr) (handle tf.Output) {
11765	if scope.Err() != nil {
11766		return
11767	}
11768	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
11769	for _, a := range optional {
11770		a(attrs)
11771	}
11772	opspec := tf.OpSpec{
11773		Type: "ShardDataset",
11774		Input: []tf.Input{
11775			input_dataset, num_shards, index,
11776		},
11777		Attrs: attrs,
11778	}
11779	op := scope.AddOperation(opspec)
11780	return op.Output(0)
11781}
11782
11783// NonMaxSuppressionV5Attr is an optional argument to NonMaxSuppressionV5.
11784type NonMaxSuppressionV5Attr func(optionalAttr)
11785
11786// NonMaxSuppressionV5PadToMaxOutputSize sets the optional pad_to_max_output_size attribute to value.
11787//
11788// value: If true, the output `selected_indices` is padded to be of length
11789// `max_output_size`. Defaults to false.
11790// If not specified, defaults to false
11791func NonMaxSuppressionV5PadToMaxOutputSize(value bool) NonMaxSuppressionV5Attr {
11792	return func(m optionalAttr) {
11793		m["pad_to_max_output_size"] = value
11794	}
11795}
11796
11797// Greedily selects a subset of bounding boxes in descending order of score,
11798//
11799// pruning away boxes that have high intersection-over-union (IOU) overlap
11800// with previously selected boxes.  Bounding boxes with score less than
11801// `score_threshold` are removed.  Bounding boxes are supplied as
11802// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
11803// diagonal pair of box corners and the coordinates can be provided as normalized
11804// (i.e., lying in the interval [0, 1]) or absolute.  Note that this algorithm
11805// is agnostic to where the origin is in the coordinate system and more
11806// generally is invariant to orthogonal transformations and translations
11807// of the coordinate system; thus translating or reflections of the coordinate
11808// system result in the same boxes being selected by the algorithm.
11809// The output of this operation is a set of integers indexing into the input
11810// collection of bounding boxes representing the selected boxes.  The bounding
11811// box coordinates corresponding to the selected indices can then be obtained
11812// using the `tf.gather operation`.  For example:
11813//   selected_indices = tf.image.non_max_suppression_v2(
11814//       boxes, scores, max_output_size, iou_threshold, score_threshold)
11815//   selected_boxes = tf.gather(boxes, selected_indices)
11816// This op also supports a Soft-NMS (with Gaussian weighting) mode (c.f.
11817// Bodla et al, https://arxiv.org/abs/1704.04503) where boxes reduce the score
11818// of other overlapping boxes instead of directly causing them to be pruned.
11819// To enable this Soft-NMS mode, set the `soft_nms_sigma` parameter to be
11820// larger than 0.
11821//
11822// Arguments:
11823//	boxes: A 2-D float tensor of shape `[num_boxes, 4]`.
11824//	scores: A 1-D float tensor of shape `[num_boxes]` representing a single
11825// score corresponding to each box (each row of boxes).
11826//	max_output_size: A scalar integer tensor representing the maximum number of
11827// boxes to be selected by non max suppression.
11828//	iou_threshold: A 0-D float tensor representing the threshold for deciding whether
11829// boxes overlap too much with respect to IOU.
11830//	score_threshold: A 0-D float tensor representing the threshold for deciding when to remove
11831// boxes based on score.
11832//	soft_nms_sigma: A 0-D float tensor representing the sigma parameter for Soft NMS; see Bodla et
11833// al (c.f. https://arxiv.org/abs/1704.04503).  When `soft_nms_sigma=0.0` (which
11834// is default), we fall back to standard (hard) NMS.
11835//
11836// Returns:
11837//	selected_indices: A 1-D integer tensor of shape `[M]` representing the selected
11838// indices from the boxes tensor, where `M <= max_output_size`.
11839//	selected_scores: A 1-D float tensor of shape `[M]` representing the corresponding
11840// scores for each selected box, where `M <= max_output_size`.  Scores only differ
11841// from corresponding input scores when using Soft NMS (i.e. when
11842// `soft_nms_sigma>0`)
11843//	valid_outputs: A 0-D integer tensor representing the number of valid elements in
11844// `selected_indices`, with the valid elements appearing first.
11845func NonMaxSuppressionV5(scope *Scope, boxes tf.Output, scores tf.Output, max_output_size tf.Output, iou_threshold tf.Output, score_threshold tf.Output, soft_nms_sigma tf.Output, optional ...NonMaxSuppressionV5Attr) (selected_indices tf.Output, selected_scores tf.Output, valid_outputs tf.Output) {
11846	if scope.Err() != nil {
11847		return
11848	}
11849	attrs := map[string]interface{}{}
11850	for _, a := range optional {
11851		a(attrs)
11852	}
11853	opspec := tf.OpSpec{
11854		Type: "NonMaxSuppressionV5",
11855		Input: []tf.Input{
11856			boxes, scores, max_output_size, iou_threshold, score_threshold, soft_nms_sigma,
11857		},
11858		Attrs: attrs,
11859	}
11860	op := scope.AddOperation(opspec)
11861	return op.Output(0), op.Output(1), op.Output(2)
11862}
11863
11864// NonMaxSuppressionV4Attr is an optional argument to NonMaxSuppressionV4.
11865type NonMaxSuppressionV4Attr func(optionalAttr)
11866
11867// NonMaxSuppressionV4PadToMaxOutputSize sets the optional pad_to_max_output_size attribute to value.
11868//
11869// value: If true, the output `selected_indices` is padded to be of length
11870// `max_output_size`. Defaults to false.
11871// If not specified, defaults to false
11872func NonMaxSuppressionV4PadToMaxOutputSize(value bool) NonMaxSuppressionV4Attr {
11873	return func(m optionalAttr) {
11874		m["pad_to_max_output_size"] = value
11875	}
11876}
11877
11878// Greedily selects a subset of bounding boxes in descending order of score,
11879//
11880// pruning away boxes that have high intersection-over-union (IOU) overlap
11881// with previously selected boxes.  Bounding boxes with score less than
11882// `score_threshold` are removed.  Bounding boxes are supplied as
11883// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
11884// diagonal pair of box corners and the coordinates can be provided as normalized
11885// (i.e., lying in the interval [0, 1]) or absolute.  Note that this algorithm
11886// is agnostic to where the origin is in the coordinate system and more
11887// generally is invariant to orthogonal transformations and translations
11888// of the coordinate system; thus translating or reflections of the coordinate
11889// system result in the same boxes being selected by the algorithm.
11890// The output of this operation is a set of integers indexing into the input
11891// collection of bounding boxes representing the selected boxes.  The bounding
11892// box coordinates corresponding to the selected indices can then be obtained
11893// using the `tf.gather operation`.  For example:
11894//   selected_indices = tf.image.non_max_suppression_v2(
11895//       boxes, scores, max_output_size, iou_threshold, score_threshold)
11896//   selected_boxes = tf.gather(boxes, selected_indices)
11897//
11898// Arguments:
11899//	boxes: A 2-D float tensor of shape `[num_boxes, 4]`.
11900//	scores: A 1-D float tensor of shape `[num_boxes]` representing a single
11901// score corresponding to each box (each row of boxes).
11902//	max_output_size: A scalar integer tensor representing the maximum number of
11903// boxes to be selected by non max suppression.
11904//	iou_threshold: A 0-D float tensor representing the threshold for deciding whether
11905// boxes overlap too much with respect to IOU.
11906//	score_threshold: A 0-D float tensor representing the threshold for deciding when to remove
11907// boxes based on score.
11908//
11909// Returns:
11910//	selected_indices: A 1-D integer tensor of shape `[M]` representing the selected
11911// indices from the boxes tensor, where `M <= max_output_size`.
11912//	valid_outputs: A 0-D integer tensor representing the number of valid elements in
11913// `selected_indices`, with the valid elements appearing first.
11914func NonMaxSuppressionV4(scope *Scope, boxes tf.Output, scores tf.Output, max_output_size tf.Output, iou_threshold tf.Output, score_threshold tf.Output, optional ...NonMaxSuppressionV4Attr) (selected_indices tf.Output, valid_outputs tf.Output) {
11915	if scope.Err() != nil {
11916		return
11917	}
11918	attrs := map[string]interface{}{}
11919	for _, a := range optional {
11920		a(attrs)
11921	}
11922	opspec := tf.OpSpec{
11923		Type: "NonMaxSuppressionV4",
11924		Input: []tf.Input{
11925			boxes, scores, max_output_size, iou_threshold, score_threshold,
11926		},
11927		Attrs: attrs,
11928	}
11929	op := scope.AddOperation(opspec)
11930	return op.Output(0), op.Output(1)
11931}
11932
11933// Greedily selects a subset of bounding boxes in descending order of score,
11934//
11935// pruning away boxes that have high intersection-over-union (IOU) overlap
11936// with previously selected boxes.  Bounding boxes with score less than
11937// `score_threshold` are removed.  Bounding boxes are supplied as
11938// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
11939// diagonal pair of box corners and the coordinates can be provided as normalized
11940// (i.e., lying in the interval [0, 1]) or absolute.  Note that this algorithm
11941// is agnostic to where the origin is in the coordinate system and more
11942// generally is invariant to orthogonal transformations and translations
11943// of the coordinate system; thus translating or reflections of the coordinate
11944// system result in the same boxes being selected by the algorithm.
11945// The output of this operation is a set of integers indexing into the input
11946// collection of bounding boxes representing the selected boxes.  The bounding
11947// box coordinates corresponding to the selected indices can then be obtained
11948// using the `tf.gather operation`.  For example:
11949//   selected_indices = tf.image.non_max_suppression_v2(
11950//       boxes, scores, max_output_size, iou_threshold, score_threshold)
11951//   selected_boxes = tf.gather(boxes, selected_indices)
11952//
11953// Arguments:
11954//	boxes: A 2-D float tensor of shape `[num_boxes, 4]`.
11955//	scores: A 1-D float tensor of shape `[num_boxes]` representing a single
11956// score corresponding to each box (each row of boxes).
11957//	max_output_size: A scalar integer tensor representing the maximum number of
11958// boxes to be selected by non max suppression.
11959//	iou_threshold: A 0-D float tensor representing the threshold for deciding whether
11960// boxes overlap too much with respect to IOU.
11961//	score_threshold: A 0-D float tensor representing the threshold for deciding when to remove
11962// boxes based on score.
11963//
11964// Returns A 1-D integer tensor of shape `[M]` representing the selected
11965// indices from the boxes tensor, where `M <= max_output_size`.
11966func NonMaxSuppressionV3(scope *Scope, boxes tf.Output, scores tf.Output, max_output_size tf.Output, iou_threshold tf.Output, score_threshold tf.Output) (selected_indices tf.Output) {
11967	if scope.Err() != nil {
11968		return
11969	}
11970	opspec := tf.OpSpec{
11971		Type: "NonMaxSuppressionV3",
11972		Input: []tf.Input{
11973			boxes, scores, max_output_size, iou_threshold, score_threshold,
11974		},
11975	}
11976	op := scope.AddOperation(opspec)
11977	return op.Output(0)
11978}
11979
11980// Greedily selects a subset of bounding boxes in descending order of score,
11981//
11982// pruning away boxes that have high intersection-over-union (IOU) overlap
11983// with previously selected boxes.  Bounding boxes are supplied as
11984// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
11985// diagonal pair of box corners and the coordinates can be provided as normalized
11986// (i.e., lying in the interval [0, 1]) or absolute.  Note that this algorithm
11987// is agnostic to where the origin is in the coordinate system.  Note that this
11988// algorithm is invariant to orthogonal transformations and translations
11989// of the coordinate system; thus translating or reflections of the coordinate
11990// system result in the same boxes being selected by the algorithm.
11991//
11992// The output of this operation is a set of integers indexing into the input
11993// collection of bounding boxes representing the selected boxes.  The bounding
11994// box coordinates corresponding to the selected indices can then be obtained
11995// using the `tf.gather operation`.  For example:
11996//
11997//   selected_indices = tf.image.non_max_suppression_v2(
11998//       boxes, scores, max_output_size, iou_threshold)
11999//   selected_boxes = tf.gather(boxes, selected_indices)
12000//
12001// Arguments:
12002//	boxes: A 2-D float tensor of shape `[num_boxes, 4]`.
12003//	scores: A 1-D float tensor of shape `[num_boxes]` representing a single
12004// score corresponding to each box (each row of boxes).
12005//	max_output_size: A scalar integer tensor representing the maximum number of
12006// boxes to be selected by non max suppression.
12007//	iou_threshold: A 0-D float tensor representing the threshold for deciding whether
12008// boxes overlap too much with respect to IOU.
12009//
12010// Returns A 1-D integer tensor of shape `[M]` representing the selected
12011// indices from the boxes tensor, where `M <= max_output_size`.
12012func NonMaxSuppressionV2(scope *Scope, boxes tf.Output, scores tf.Output, max_output_size tf.Output, iou_threshold tf.Output) (selected_indices tf.Output) {
12013	if scope.Err() != nil {
12014		return
12015	}
12016	opspec := tf.OpSpec{
12017		Type: "NonMaxSuppressionV2",
12018		Input: []tf.Input{
12019			boxes, scores, max_output_size, iou_threshold,
12020		},
12021	}
12022	op := scope.AddOperation(opspec)
12023	return op.Output(0)
12024}
12025
12026// NonMaxSuppressionAttr is an optional argument to NonMaxSuppression.
12027type NonMaxSuppressionAttr func(optionalAttr)
12028
12029// NonMaxSuppressionIouThreshold sets the optional iou_threshold attribute to value.
12030//
12031// value: A float representing the threshold for deciding whether boxes
12032// overlap too much with respect to IOU.
12033// If not specified, defaults to 0.5
12034func NonMaxSuppressionIouThreshold(value float32) NonMaxSuppressionAttr {
12035	return func(m optionalAttr) {
12036		m["iou_threshold"] = value
12037	}
12038}
12039
12040// Greedily selects a subset of bounding boxes in descending order of score,
12041//
12042// pruning away boxes that have high intersection-over-union (IOU) overlap
12043// with previously selected boxes.  Bounding boxes are supplied as
12044// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
12045// diagonal pair of box corners and the coordinates can be provided as normalized
12046// (i.e., lying in the interval [0, 1]) or absolute.  Note that this algorithm
12047// is agnostic to where the origin is in the coordinate system.  Note that this
12048// algorithm is invariant to orthogonal transformations and translations
12049// of the coordinate system; thus translating or reflections of the coordinate
12050// system result in the same boxes being selected by the algorithm.
12051// The output of this operation is a set of integers indexing into the input
12052// collection of bounding boxes representing the selected boxes.  The bounding
12053// box coordinates corresponding to the selected indices can then be obtained
12054// using the `tf.gather operation`.  For example:
12055//   selected_indices = tf.image.non_max_suppression(
12056//       boxes, scores, max_output_size, iou_threshold)
12057//   selected_boxes = tf.gather(boxes, selected_indices)
12058//
12059// Arguments:
12060//	boxes: A 2-D float tensor of shape `[num_boxes, 4]`.
12061//	scores: A 1-D float tensor of shape `[num_boxes]` representing a single
12062// score corresponding to each box (each row of boxes).
12063//	max_output_size: A scalar integer tensor representing the maximum number of
12064// boxes to be selected by non max suppression.
12065//
12066// Returns A 1-D integer tensor of shape `[M]` representing the selected
12067// indices from the boxes tensor, where `M <= max_output_size`.
12068func NonMaxSuppression(scope *Scope, boxes tf.Output, scores tf.Output, max_output_size tf.Output, optional ...NonMaxSuppressionAttr) (selected_indices tf.Output) {
12069	if scope.Err() != nil {
12070		return
12071	}
12072	attrs := map[string]interface{}{}
12073	for _, a := range optional {
12074		a(attrs)
12075	}
12076	opspec := tf.OpSpec{
12077		Type: "NonMaxSuppression",
12078		Input: []tf.Input{
12079			boxes, scores, max_output_size,
12080		},
12081		Attrs: attrs,
12082	}
12083	op := scope.AddOperation(opspec)
12084	return op.Output(0)
12085}
12086
12087// CropAndResizeGradBoxesAttr is an optional argument to CropAndResizeGradBoxes.
12088type CropAndResizeGradBoxesAttr func(optionalAttr)
12089
12090// CropAndResizeGradBoxesMethod sets the optional method attribute to value.
12091//
12092// value: A string specifying the interpolation method. Only 'bilinear' is
12093// supported for now.
12094// If not specified, defaults to "bilinear"
12095func CropAndResizeGradBoxesMethod(value string) CropAndResizeGradBoxesAttr {
12096	return func(m optionalAttr) {
12097		m["method"] = value
12098	}
12099}
12100
12101// Computes the gradient of the crop_and_resize op wrt the input boxes tensor.
12102//
12103// Arguments:
12104//	grads: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.
12105//	image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
12106// Both `image_height` and `image_width` need to be positive.
12107//	boxes: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
12108// specifies the coordinates of a box in the `box_ind[i]` image and is specified
12109// in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of
12110// `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the
12111// `[0, 1]` interval of normalized image height is mapped to
12112// `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in
12113// which case the sampled crop is an up-down flipped version of the original
12114// image. The width dimension is treated similarly. Normalized coordinates
12115// outside the `[0, 1]` range are allowed, in which case we use
12116// `extrapolation_value` to extrapolate the input image values.
12117//	box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
12118// The value of `box_ind[i]` specifies the image that the `i`-th box refers to.
12119//
12120// Returns A 2-D tensor of shape `[num_boxes, 4]`.
12121func CropAndResizeGradBoxes(scope *Scope, grads tf.Output, image tf.Output, boxes tf.Output, box_ind tf.Output, optional ...CropAndResizeGradBoxesAttr) (output tf.Output) {
12122	if scope.Err() != nil {
12123		return
12124	}
12125	attrs := map[string]interface{}{}
12126	for _, a := range optional {
12127		a(attrs)
12128	}
12129	opspec := tf.OpSpec{
12130		Type: "CropAndResizeGradBoxes",
12131		Input: []tf.Input{
12132			grads, image, boxes, box_ind,
12133		},
12134		Attrs: attrs,
12135	}
12136	op := scope.AddOperation(opspec)
12137	return op.Output(0)
12138}
12139
12140// ExtractGlimpseV2Attr is an optional argument to ExtractGlimpseV2.
12141type ExtractGlimpseV2Attr func(optionalAttr)
12142
12143// ExtractGlimpseV2Centered sets the optional centered attribute to value.
12144//
12145// value: indicates if the offset coordinates are centered relative to
12146// the image, in which case the (0, 0) offset is relative to the center
12147// of the input images. If false, the (0,0) offset corresponds to the
12148// upper left corner of the input images.
12149// If not specified, defaults to true
12150func ExtractGlimpseV2Centered(value bool) ExtractGlimpseV2Attr {
12151	return func(m optionalAttr) {
12152		m["centered"] = value
12153	}
12154}
12155
12156// ExtractGlimpseV2Normalized sets the optional normalized attribute to value.
12157//
12158// value: indicates if the offset coordinates are normalized.
12159// If not specified, defaults to true
12160func ExtractGlimpseV2Normalized(value bool) ExtractGlimpseV2Attr {
12161	return func(m optionalAttr) {
12162		m["normalized"] = value
12163	}
12164}
12165
12166// ExtractGlimpseV2UniformNoise sets the optional uniform_noise attribute to value.
12167//
12168// value: indicates if the noise should be generated using a
12169// uniform distribution or a Gaussian distribution.
12170// If not specified, defaults to true
12171func ExtractGlimpseV2UniformNoise(value bool) ExtractGlimpseV2Attr {
12172	return func(m optionalAttr) {
12173		m["uniform_noise"] = value
12174	}
12175}
12176
12177// ExtractGlimpseV2Noise sets the optional noise attribute to value.
12178//
12179// value: indicates if the noise should `uniform`, `gaussian`, or
12180// `zero`. The default is `uniform` which means the noise type
12181// will be decided by `uniform_noise`.
12182// If not specified, defaults to "uniform"
12183func ExtractGlimpseV2Noise(value string) ExtractGlimpseV2Attr {
12184	return func(m optionalAttr) {
12185		m["noise"] = value
12186	}
12187}
12188
12189// Extracts a glimpse from the input tensor.
12190//
12191// Returns a set of windows called glimpses extracted at location
12192// `offsets` from the input tensor. If the windows only partially
12193// overlaps the inputs, the non overlapping areas will be filled with
12194// random noise.
12195//
12196// The result is a 4-D tensor of shape `[batch_size, glimpse_height,
12197// glimpse_width, channels]`. The channels and batch dimensions are the
12198// same as that of the input tensor. The height and width of the output
12199// windows are specified in the `size` parameter.
12200//
12201// The argument `normalized` and `centered` controls how the windows are built:
12202//
12203// * If the coordinates are normalized but not centered, 0.0 and 1.0
12204//   correspond to the minimum and maximum of each height and width
12205//   dimension.
12206// * If the coordinates are both normalized and centered, they range from
12207//   -1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper
12208//   left corner, the lower right corner is located at (1.0, 1.0) and the
12209//   center is at (0, 0).
12210// * If the coordinates are not normalized they are interpreted as
12211//   numbers of pixels.
12212//
12213// Arguments:
12214//	input: A 4-D float tensor of shape `[batch_size, height, width, channels]`.
12215//	size: A 1-D tensor of 2 elements containing the size of the glimpses
12216// to extract.  The glimpse height must be specified first, following
12217// by the glimpse width.
12218//	offsets: A 2-D integer tensor of shape `[batch_size, 2]` containing
12219// the y, x locations of the center of each window.
12220//
12221// Returns A tensor representing the glimpses `[batch_size,
12222// glimpse_height, glimpse_width, channels]`.
12223func ExtractGlimpseV2(scope *Scope, input tf.Output, size tf.Output, offsets tf.Output, optional ...ExtractGlimpseV2Attr) (glimpse tf.Output) {
12224	if scope.Err() != nil {
12225		return
12226	}
12227	attrs := map[string]interface{}{}
12228	for _, a := range optional {
12229		a(attrs)
12230	}
12231	opspec := tf.OpSpec{
12232		Type: "ExtractGlimpseV2",
12233		Input: []tf.Input{
12234			input, size, offsets,
12235		},
12236		Attrs: attrs,
12237	}
12238	op := scope.AddOperation(opspec)
12239	return op.Output(0)
12240}
12241
12242// ExtractGlimpseAttr is an optional argument to ExtractGlimpse.
12243type ExtractGlimpseAttr func(optionalAttr)
12244
12245// ExtractGlimpseCentered sets the optional centered attribute to value.
12246//
12247// value: indicates if the offset coordinates are centered relative to
12248// the image, in which case the (0, 0) offset is relative to the center
12249// of the input images. If false, the (0,0) offset corresponds to the
12250// upper left corner of the input images.
12251// If not specified, defaults to true
12252func ExtractGlimpseCentered(value bool) ExtractGlimpseAttr {
12253	return func(m optionalAttr) {
12254		m["centered"] = value
12255	}
12256}
12257
12258// ExtractGlimpseNormalized sets the optional normalized attribute to value.
12259//
12260// value: indicates if the offset coordinates are normalized.
12261// If not specified, defaults to true
12262func ExtractGlimpseNormalized(value bool) ExtractGlimpseAttr {
12263	return func(m optionalAttr) {
12264		m["normalized"] = value
12265	}
12266}
12267
12268// ExtractGlimpseUniformNoise sets the optional uniform_noise attribute to value.
12269//
12270// value: indicates if the noise should be generated using a
12271// uniform distribution or a Gaussian distribution.
12272// If not specified, defaults to true
12273func ExtractGlimpseUniformNoise(value bool) ExtractGlimpseAttr {
12274	return func(m optionalAttr) {
12275		m["uniform_noise"] = value
12276	}
12277}
12278
12279// ExtractGlimpseNoise sets the optional noise attribute to value.
12280//
12281// value: indicates if the noise should `uniform`, `gaussian`, or
12282// `zero`. The default is `uniform` which means the noise type
12283// will be decided by `uniform_noise`.
12284// If not specified, defaults to "uniform"
12285func ExtractGlimpseNoise(value string) ExtractGlimpseAttr {
12286	return func(m optionalAttr) {
12287		m["noise"] = value
12288	}
12289}
12290
12291// Extracts a glimpse from the input tensor.
12292//
12293// Returns a set of windows called glimpses extracted at location
12294// `offsets` from the input tensor. If the windows only partially
12295// overlaps the inputs, the non overlapping areas will be filled with
12296// random noise.
12297//
12298// The result is a 4-D tensor of shape `[batch_size, glimpse_height,
12299// glimpse_width, channels]`. The channels and batch dimensions are the
12300// same as that of the input tensor. The height and width of the output
12301// windows are specified in the `size` parameter.
12302//
12303// The argument `normalized` and `centered` controls how the windows are built:
12304//
12305// * If the coordinates are normalized but not centered, 0.0 and 1.0
12306//   correspond to the minimum and maximum of each height and width
12307//   dimension.
12308// * If the coordinates are both normalized and centered, they range from
12309//   -1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper
12310//   left corner, the lower right corner is located at (1.0, 1.0) and the
12311//   center is at (0, 0).
12312// * If the coordinates are not normalized they are interpreted as
12313//   numbers of pixels.
12314//
12315// Arguments:
12316//	input: A 4-D float tensor of shape `[batch_size, height, width, channels]`.
12317//	size: A 1-D tensor of 2 elements containing the size of the glimpses
12318// to extract.  The glimpse height must be specified first, following
12319// by the glimpse width.
12320//	offsets: A 2-D integer tensor of shape `[batch_size, 2]` containing
12321// the y, x locations of the center of each window.
12322//
12323// Returns A tensor representing the glimpses `[batch_size,
12324// glimpse_height, glimpse_width, channels]`.
12325func ExtractGlimpse(scope *Scope, input tf.Output, size tf.Output, offsets tf.Output, optional ...ExtractGlimpseAttr) (glimpse tf.Output) {
12326	if scope.Err() != nil {
12327		return
12328	}
12329	attrs := map[string]interface{}{}
12330	for _, a := range optional {
12331		a(attrs)
12332	}
12333	opspec := tf.OpSpec{
12334		Type: "ExtractGlimpse",
12335		Input: []tf.Input{
12336			input, size, offsets,
12337		},
12338		Attrs: attrs,
12339	}
12340	op := scope.AddOperation(opspec)
12341	return op.Output(0)
12342}
12343
12344// StatelessSampleDistortedBoundingBoxAttr is an optional argument to StatelessSampleDistortedBoundingBox.
12345type StatelessSampleDistortedBoundingBoxAttr func(optionalAttr)
12346
12347// StatelessSampleDistortedBoundingBoxAspectRatioRange sets the optional aspect_ratio_range attribute to value.
12348//
12349// value: The cropped area of the image must have an aspect ratio =
12350// width / height within this range.
12351// If not specified, defaults to <f:0.75 f:1.33 >
12352func StatelessSampleDistortedBoundingBoxAspectRatioRange(value []float32) StatelessSampleDistortedBoundingBoxAttr {
12353	return func(m optionalAttr) {
12354		m["aspect_ratio_range"] = value
12355	}
12356}
12357
12358// StatelessSampleDistortedBoundingBoxAreaRange sets the optional area_range attribute to value.
12359//
12360// value: The cropped area of the image must contain a fraction of the
12361// supplied image within this range.
12362// If not specified, defaults to <f:0.05 f:1 >
12363func StatelessSampleDistortedBoundingBoxAreaRange(value []float32) StatelessSampleDistortedBoundingBoxAttr {
12364	return func(m optionalAttr) {
12365		m["area_range"] = value
12366	}
12367}
12368
12369// StatelessSampleDistortedBoundingBoxMaxAttempts sets the optional max_attempts attribute to value.
12370//
12371// value: Number of attempts at generating a cropped region of the image
12372// of the specified constraints. After `max_attempts` failures, return the entire
12373// image.
12374// If not specified, defaults to 100
12375func StatelessSampleDistortedBoundingBoxMaxAttempts(value int64) StatelessSampleDistortedBoundingBoxAttr {
12376	return func(m optionalAttr) {
12377		m["max_attempts"] = value
12378	}
12379}
12380
12381// StatelessSampleDistortedBoundingBoxUseImageIfNoBoundingBoxes sets the optional use_image_if_no_bounding_boxes attribute to value.
12382//
12383// value: Controls behavior if no bounding boxes supplied.
12384// If true, assume an implicit bounding box covering the whole input. If false,
12385// raise an error.
12386// If not specified, defaults to false
12387func StatelessSampleDistortedBoundingBoxUseImageIfNoBoundingBoxes(value bool) StatelessSampleDistortedBoundingBoxAttr {
12388	return func(m optionalAttr) {
12389		m["use_image_if_no_bounding_boxes"] = value
12390	}
12391}
12392
12393// Generate a randomly distorted bounding box for an image deterministically.
12394//
12395// Bounding box annotations are often supplied in addition to ground-truth labels
12396// in image recognition or object localization tasks. A common technique for
12397// training such a system is to randomly distort an image while preserving its
12398// content, i.e. *data augmentation*. This Op, given the same `seed`,
12399// deterministically outputs a randomly distorted localization of an object, i.e.
12400// bounding box, given an `image_size`, `bounding_boxes` and a series of
12401// constraints.
12402//
12403// The output of this Op is a single bounding box that may be used to crop the
12404// original image. The output is returned as 3 tensors: `begin`, `size` and
12405// `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the
12406// image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize
12407// what the bounding box looks like.
12408//
12409// Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The
12410// bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
12411// the height of the underlying image.
12412//
12413// The output of this Op is guaranteed to be the same given the same `seed` and is
12414// independent of how many times the function is called, and independent of global
12415// seed settings (e.g. `tf.random.set_seed`).
12416//
12417// Example usage:
12418//
12419// >>> image = np.array([[[1], [2], [3]], [[4], [5], [6]], [[7], [8], [9]]])
12420// >>> bbox = tf.constant(
12421// ...   [0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
12422// >>> seed = (1, 2)
12423// >>> # Generate a single distorted bounding box.
12424// >>> bbox_begin, bbox_size, bbox_draw = (
12425// ...   tf.image.stateless_sample_distorted_bounding_box(
12426// ...     tf.shape(image), bounding_boxes=bbox, seed=seed))
12427// >>> # Employ the bounding box to distort the image.
12428// >>> tf.slice(image, bbox_begin, bbox_size)
12429// <tf.Tensor: shape=(2, 2, 1), dtype=int64, numpy=
12430// array([[[1],
12431//         [2]],
12432//        [[4],
12433//         [5]]])>
12434// >>> # Draw the bounding box in an image summary.
12435// >>> colors = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 1.0]])
12436// >>> tf.image.draw_bounding_boxes(
12437// ...   tf.expand_dims(tf.cast(image, tf.float32),0), bbox_draw, colors)
12438// <tf.Tensor: shape=(1, 3, 3, 1), dtype=float32, numpy=
12439// array([[[[1.],
12440//          [1.],
12441//          [3.]],
12442//         [[1.],
12443//          [1.],
12444//          [6.]],
12445//         [[7.],
12446//          [8.],
12447//          [9.]]]], dtype=float32)>
12448//
12449// Note that if no bounding box information is available, setting
12450// `use_image_if_no_bounding_boxes = true` will assume there is a single implicit
12451// bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is
12452// false and no bounding boxes are supplied, an error is raised.
12453//
12454// Arguments:
12455//	image_size: 1-D, containing `[height, width, channels]`.
12456//	bounding_boxes: 3-D with shape `[batch, N, 4]` describing the N bounding boxes
12457// associated with the image.
12458//	min_object_covered: The cropped area of the image must contain at least this
12459// fraction of any bounding box supplied. The value of this parameter should be
12460// non-negative. In the case of 0, the cropped area does not need to overlap
12461// any of the bounding boxes supplied.
12462//	seed: 1-D with shape `[2]`. The seed to the random number generator. Must have dtype
12463// `int32` or `int64`. (When using XLA, only `int32` is allowed.)
12464//
12465// Returns:
12466//	begin: 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to
12467// `tf.slice`.
12468//	size: 1-D, containing `[target_height, target_width, -1]`. Provide as input to
12469// `tf.slice`.
12470//	bboxes: 3-D with shape `[1, 1, 4]` containing the distorted bounding box.
12471// Provide as input to `tf.image.draw_bounding_boxes`.
12472func StatelessSampleDistortedBoundingBox(scope *Scope, image_size tf.Output, bounding_boxes tf.Output, min_object_covered tf.Output, seed tf.Output, optional ...StatelessSampleDistortedBoundingBoxAttr) (begin tf.Output, size tf.Output, bboxes tf.Output) {
12473	if scope.Err() != nil {
12474		return
12475	}
12476	attrs := map[string]interface{}{}
12477	for _, a := range optional {
12478		a(attrs)
12479	}
12480	opspec := tf.OpSpec{
12481		Type: "StatelessSampleDistortedBoundingBox",
12482		Input: []tf.Input{
12483			image_size, bounding_boxes, min_object_covered, seed,
12484		},
12485		Attrs: attrs,
12486	}
12487	op := scope.AddOperation(opspec)
12488	return op.Output(0), op.Output(1), op.Output(2)
12489}
12490
12491// SampleDistortedBoundingBoxAttr is an optional argument to SampleDistortedBoundingBox.
12492type SampleDistortedBoundingBoxAttr func(optionalAttr)
12493
12494// SampleDistortedBoundingBoxSeed sets the optional seed attribute to value.
12495//
12496// value: If either `seed` or `seed2` are set to non-zero, the random number
12497// generator is seeded by the given `seed`.  Otherwise, it is seeded by a random
12498// seed.
12499// If not specified, defaults to 0
12500func SampleDistortedBoundingBoxSeed(value int64) SampleDistortedBoundingBoxAttr {
12501	return func(m optionalAttr) {
12502		m["seed"] = value
12503	}
12504}
12505
12506// SampleDistortedBoundingBoxSeed2 sets the optional seed2 attribute to value.
12507//
12508// value: A second seed to avoid seed collision.
12509// If not specified, defaults to 0
12510func SampleDistortedBoundingBoxSeed2(value int64) SampleDistortedBoundingBoxAttr {
12511	return func(m optionalAttr) {
12512		m["seed2"] = value
12513	}
12514}
12515
12516// SampleDistortedBoundingBoxMinObjectCovered sets the optional min_object_covered attribute to value.
12517//
12518// value: The cropped area of the image must contain at least this
12519// fraction of any bounding box supplied. The value of this parameter should be
12520// non-negative. In the case of 0, the cropped area does not need to overlap
12521// any of the bounding boxes supplied.
12522// If not specified, defaults to 0.1
12523func SampleDistortedBoundingBoxMinObjectCovered(value float32) SampleDistortedBoundingBoxAttr {
12524	return func(m optionalAttr) {
12525		m["min_object_covered"] = value
12526	}
12527}
12528
12529// SampleDistortedBoundingBoxAspectRatioRange sets the optional aspect_ratio_range attribute to value.
12530//
12531// value: The cropped area of the image must have an aspect ratio =
12532// width / height within this range.
12533// If not specified, defaults to <f:0.75 f:1.33 >
12534func SampleDistortedBoundingBoxAspectRatioRange(value []float32) SampleDistortedBoundingBoxAttr {
12535	return func(m optionalAttr) {
12536		m["aspect_ratio_range"] = value
12537	}
12538}
12539
12540// SampleDistortedBoundingBoxAreaRange sets the optional area_range attribute to value.
12541//
12542// value: The cropped area of the image must contain a fraction of the
12543// supplied image within this range.
12544// If not specified, defaults to <f:0.05 f:1 >
12545func SampleDistortedBoundingBoxAreaRange(value []float32) SampleDistortedBoundingBoxAttr {
12546	return func(m optionalAttr) {
12547		m["area_range"] = value
12548	}
12549}
12550
12551// SampleDistortedBoundingBoxMaxAttempts sets the optional max_attempts attribute to value.
12552//
12553// value: Number of attempts at generating a cropped region of the image
12554// of the specified constraints. After `max_attempts` failures, return the entire
12555// image.
12556// If not specified, defaults to 100
12557func SampleDistortedBoundingBoxMaxAttempts(value int64) SampleDistortedBoundingBoxAttr {
12558	return func(m optionalAttr) {
12559		m["max_attempts"] = value
12560	}
12561}
12562
12563// SampleDistortedBoundingBoxUseImageIfNoBoundingBoxes sets the optional use_image_if_no_bounding_boxes attribute to value.
12564//
12565// value: Controls behavior if no bounding boxes supplied.
12566// If true, assume an implicit bounding box covering the whole input. If false,
12567// raise an error.
12568// If not specified, defaults to false
12569func SampleDistortedBoundingBoxUseImageIfNoBoundingBoxes(value bool) SampleDistortedBoundingBoxAttr {
12570	return func(m optionalAttr) {
12571		m["use_image_if_no_bounding_boxes"] = value
12572	}
12573}
12574
12575// Generate a single randomly distorted bounding box for an image.
12576//
12577// Bounding box annotations are often supplied in addition to ground-truth labels
12578// in image recognition or object localization tasks. A common technique for
12579// training such a system is to randomly distort an image while preserving
12580// its content, i.e. *data augmentation*. This Op outputs a randomly distorted
12581// localization of an object, i.e. bounding box, given an `image_size`,
12582// `bounding_boxes` and a series of constraints.
12583//
12584// The output of this Op is a single bounding box that may be used to crop the
12585// original image. The output is returned as 3 tensors: `begin`, `size` and
12586// `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the
12587// image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize
12588// what the bounding box looks like.
12589//
12590// Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The
12591// bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
12592// height of the underlying image.
12593//
12594// For example,
12595//
12596// ```python
12597//     # Generate a single distorted bounding box.
12598//     begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(
12599//         tf.shape(image),
12600//         bounding_boxes=bounding_boxes)
12601//
12602//     # Draw the bounding box in an image summary.
12603//     image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
12604//                                                   bbox_for_draw)
12605//     tf.summary.image('images_with_box', image_with_box)
12606//
12607//     # Employ the bounding box to distort the image.
12608//     distorted_image = tf.slice(image, begin, size)
12609// ```
12610//
12611// Note that if no bounding box information is available, setting
12612// `use_image_if_no_bounding_boxes = true` will assume there is a single implicit
12613// bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is
12614// false and no bounding boxes are supplied, an error is raised.
12615//
12616// Arguments:
12617//	image_size: 1-D, containing `[height, width, channels]`.
12618//	bounding_boxes: 3-D with shape `[batch, N, 4]` describing the N bounding boxes
12619// associated with the image.
12620//
12621// Returns:
12622//	begin: 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to
12623// `tf.slice`.
12624//	size: 1-D, containing `[target_height, target_width, -1]`. Provide as input to
12625// `tf.slice`.
12626//	bboxes: 3-D with shape `[1, 1, 4]` containing the distorted bounding box.
12627// Provide as input to `tf.image.draw_bounding_boxes`.
12628func SampleDistortedBoundingBox(scope *Scope, image_size tf.Output, bounding_boxes tf.Output, optional ...SampleDistortedBoundingBoxAttr) (begin tf.Output, size tf.Output, bboxes tf.Output) {
12629	if scope.Err() != nil {
12630		return
12631	}
12632	attrs := map[string]interface{}{}
12633	for _, a := range optional {
12634		a(attrs)
12635	}
12636	opspec := tf.OpSpec{
12637		Type: "SampleDistortedBoundingBox",
12638		Input: []tf.Input{
12639			image_size, bounding_boxes,
12640		},
12641		Attrs: attrs,
12642	}
12643	op := scope.AddOperation(opspec)
12644	return op.Output(0), op.Output(1), op.Output(2)
12645}
12646
12647// Draw bounding boxes on a batch of images.
12648//
12649// Outputs a copy of `images` but draws on top of the pixels zero or more bounding
12650// boxes specified by the locations in `boxes`. The coordinates of the each
12651// bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The
12652// bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
12653// height of the underlying image.
12654//
12655// For example, if an image is 100 x 200 pixels (height x width) and the bounding
12656// box is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of
12657// the bounding box will be `(40, 10)` to `(100, 50)` (in (x,y) coordinates).
12658//
12659// Parts of the bounding box may fall outside the image.
12660//
12661// Arguments:
12662//	images: 4-D with shape `[batch, height, width, depth]`. A batch of images.
12663//	boxes: 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding
12664// boxes.
12665//	colors: 2-D. A list of RGBA colors to cycle through for the boxes.
12666//
12667// Returns 4-D with the same shape as `images`. The batch of input images with
12668// bounding boxes drawn on the images.
12669func DrawBoundingBoxesV2(scope *Scope, images tf.Output, boxes tf.Output, colors tf.Output) (output tf.Output) {
12670	if scope.Err() != nil {
12671		return
12672	}
12673	opspec := tf.OpSpec{
12674		Type: "DrawBoundingBoxesV2",
12675		Input: []tf.Input{
12676			images, boxes, colors,
12677		},
12678	}
12679	op := scope.AddOperation(opspec)
12680	return op.Output(0)
12681}
12682
12683// Draw bounding boxes on a batch of images.
12684//
12685// Outputs a copy of `images` but draws on top of the pixels zero or more bounding
12686// boxes specified by the locations in `boxes`. The coordinates of the each
12687// bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The
12688// bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
12689// height of the underlying image.
12690//
12691// For example, if an image is 100 x 200 pixels (height x width) and the bounding
12692// box is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of
12693// the bounding box will be `(40, 10)` to `(180, 50)` (in (x,y) coordinates).
12694//
12695// Parts of the bounding box may fall outside the image.
12696//
12697// Arguments:
12698//	images: 4-D with shape `[batch, height, width, depth]`. A batch of images.
12699//	boxes: 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding
12700// boxes.
12701//
12702// Returns 4-D with the same shape as `images`. The batch of input images with
12703// bounding boxes drawn on the images.
12704func DrawBoundingBoxes(scope *Scope, images tf.Output, boxes tf.Output) (output tf.Output) {
12705	if scope.Err() != nil {
12706		return
12707	}
12708	opspec := tf.OpSpec{
12709		Type: "DrawBoundingBoxes",
12710		Input: []tf.Input{
12711			images, boxes,
12712		},
12713	}
12714	op := scope.AddOperation(opspec)
12715	return op.Output(0)
12716}
12717
12718// Convert one or more images from HSV to RGB.
12719//
12720// Outputs a tensor of the same shape as the `images` tensor, containing the RGB
12721// value of the pixels. The output is only well defined if the value in `images`
12722// are in `[0,1]`.
12723//
12724// See `rgb_to_hsv` for a description of the HSV encoding.
12725//
12726// Arguments:
12727//	images: 1-D or higher rank. HSV data to convert. Last dimension must be size 3.
12728//
12729// Returns `images` converted to RGB.
12730func HSVToRGB(scope *Scope, images tf.Output) (output tf.Output) {
12731	if scope.Err() != nil {
12732		return
12733	}
12734	opspec := tf.OpSpec{
12735		Type: "HSVToRGB",
12736		Input: []tf.Input{
12737			images,
12738		},
12739	}
12740	op := scope.AddOperation(opspec)
12741	return op.Output(0)
12742}
12743
12744// Converts one or more images from RGB to HSV.
12745//
12746// Outputs a tensor of the same shape as the `images` tensor, containing the HSV
12747// value of the pixels. The output is only well defined if the value in `images`
12748// are in `[0,1]`.
12749//
12750// `output[..., 0]` contains hue, `output[..., 1]` contains saturation, and
12751// `output[..., 2]` contains value. All HSV values are in `[0,1]`. A hue of 0
12752// corresponds to pure red, hue 1/3 is pure green, and 2/3 is pure blue.
12753//
12754// Usage Example:
12755//
12756// >>> blue_image = tf.stack([
12757// ...    tf.zeros([5,5]),
12758// ...    tf.zeros([5,5]),
12759// ...    tf.ones([5,5])],
12760// ...    axis=-1)
12761// >>> blue_hsv_image = tf.image.rgb_to_hsv(blue_image)
12762// >>> blue_hsv_image[0,0].numpy()
12763// array([0.6666667, 1. , 1. ], dtype=float32)
12764//
12765//
12766// Arguments:
12767//	images: 1-D or higher rank. RGB data to convert. Last dimension must be size 3.
12768//
12769// Returns `images` converted to HSV.
12770func RGBToHSV(scope *Scope, images tf.Output) (output tf.Output) {
12771	if scope.Err() != nil {
12772		return
12773	}
12774	opspec := tf.OpSpec{
12775		Type: "RGBToHSV",
12776		Input: []tf.Input{
12777			images,
12778		},
12779	}
12780	op := scope.AddOperation(opspec)
12781	return op.Output(0)
12782}
12783
12784// Decode the frame(s) of a GIF-encoded image to a uint8 tensor.
12785//
12786// GIF images with frame or transparency compression are not supported.
12787// On Linux and MacOS systems, convert animated GIFs from compressed to
12788// uncompressed by running:
12789//
12790//     convert $src.gif -coalesce $dst.gif
12791//
12792// This op also supports decoding JPEGs and PNGs, though it is cleaner to use
12793// `tf.io.decode_image`.
12794//
12795// Arguments:
12796//	contents: 0-D.  The GIF-encoded image.
12797//
12798// Returns 4-D with shape `[num_frames, height, width, 3]`. RGB channel order.
12799func DecodeGif(scope *Scope, contents tf.Output) (image tf.Output) {
12800	if scope.Err() != nil {
12801		return
12802	}
12803	opspec := tf.OpSpec{
12804		Type: "DecodeGif",
12805		Input: []tf.Input{
12806			contents,
12807		},
12808	}
12809	op := scope.AddOperation(opspec)
12810	return op.Output(0)
12811}
12812
12813// DecodeBmpAttr is an optional argument to DecodeBmp.
12814type DecodeBmpAttr func(optionalAttr)
12815
12816// DecodeBmpChannels sets the optional channels attribute to value.
12817// If not specified, defaults to 0
12818func DecodeBmpChannels(value int64) DecodeBmpAttr {
12819	return func(m optionalAttr) {
12820		m["channels"] = value
12821	}
12822}
12823
12824// Decode the first frame of a BMP-encoded image to a uint8 tensor.
12825//
12826// The attr `channels` indicates the desired number of color channels for the
12827// decoded image.
12828//
12829// Accepted values are:
12830//
12831// *   0: Use the number of channels in the BMP-encoded image.
12832// *   3: output an RGB image.
12833// *   4: output an RGBA image.
12834//
12835// Arguments:
12836//	contents: 0-D.  The BMP-encoded image.
12837//
12838// Returns 3-D with shape `[height, width, channels]`. RGB order
12839func DecodeBmp(scope *Scope, contents tf.Output, optional ...DecodeBmpAttr) (image tf.Output) {
12840	if scope.Err() != nil {
12841		return
12842	}
12843	attrs := map[string]interface{}{}
12844	for _, a := range optional {
12845		a(attrs)
12846	}
12847	opspec := tf.OpSpec{
12848		Type: "DecodeBmp",
12849		Input: []tf.Input{
12850			contents,
12851		},
12852		Attrs: attrs,
12853	}
12854	op := scope.AddOperation(opspec)
12855	return op.Output(0)
12856}
12857
12858// EncodePngAttr is an optional argument to EncodePng.
12859type EncodePngAttr func(optionalAttr)
12860
12861// EncodePngCompression sets the optional compression attribute to value.
12862//
12863// value: Compression level.
12864// If not specified, defaults to -1
12865func EncodePngCompression(value int64) EncodePngAttr {
12866	return func(m optionalAttr) {
12867		m["compression"] = value
12868	}
12869}
12870
12871// PNG-encode an image.
12872//
12873// `image` is a 3-D uint8 or uint16 Tensor of shape `[height, width, channels]`
12874// where `channels` is:
12875//
12876// *   1: for grayscale.
12877// *   2: for grayscale + alpha.
12878// *   3: for RGB.
12879// *   4: for RGBA.
12880//
12881// The ZLIB compression level, `compression`, can be -1 for the PNG-encoder
12882// default or a value from 0 to 9.  9 is the highest compression level, generating
12883// the smallest output, but is slower.
12884//
12885// Arguments:
12886//	image: 3-D with shape `[height, width, channels]`.
12887//
12888// Returns 0-D. PNG-encoded image.
12889func EncodePng(scope *Scope, image tf.Output, optional ...EncodePngAttr) (contents tf.Output) {
12890	if scope.Err() != nil {
12891		return
12892	}
12893	attrs := map[string]interface{}{}
12894	for _, a := range optional {
12895		a(attrs)
12896	}
12897	opspec := tf.OpSpec{
12898		Type: "EncodePng",
12899		Input: []tf.Input{
12900			image,
12901		},
12902		Attrs: attrs,
12903	}
12904	op := scope.AddOperation(opspec)
12905	return op.Output(0)
12906}
12907
12908// Invert (flip) each bit of supported types; for example, type `uint8` value 01010101 becomes 10101010.
12909//
12910// Flip each bit of supported types.  For example, type `int8` (decimal 2) binary 00000010 becomes (decimal -3) binary 11111101.
12911// This operation is performed on each element of the tensor argument `x`.
12912//
12913// Example:
12914// ```python
12915// import tensorflow as tf
12916// from tensorflow.python.ops import bitwise_ops
12917//
12918// # flip 2 (00000010) to -3 (11111101)
12919// tf.assert_equal(-3, bitwise_ops.invert(2))
12920//
12921// dtype_list = [dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64,
12922//               dtypes.uint8, dtypes.uint16, dtypes.uint32, dtypes.uint64]
12923//
12924// inputs = [0, 5, 3, 14]
12925// for dtype in dtype_list:
12926//   # Because of issues with negative numbers, let's test this indirectly.
12927//   # 1. invert(a) and a = 0
12928//   # 2. invert(a) or a = invert(0)
12929//   input_tensor = tf.constant([0, 5, 3, 14], dtype=dtype)
12930//   not_a_and_a, not_a_or_a, not_0 = [bitwise_ops.bitwise_and(
12931//                                       input_tensor, bitwise_ops.invert(input_tensor)),
12932//                                     bitwise_ops.bitwise_or(
12933//                                       input_tensor, bitwise_ops.invert(input_tensor)),
12934//                                     bitwise_ops.invert(
12935//                                       tf.constant(0, dtype=dtype))]
12936//
12937//   expected = tf.constant([0, 0, 0, 0], dtype=tf.float32)
12938//   tf.assert_equal(tf.cast(not_a_and_a, tf.float32), expected)
12939//
12940//   expected = tf.cast([not_0] * 4, tf.float32)
12941//   tf.assert_equal(tf.cast(not_a_or_a, tf.float32), expected)
12942//
12943//   # For unsigned dtypes let's also check the result directly.
12944//   if dtype.is_unsigned:
12945//     inverted = bitwise_ops.invert(input_tensor)
12946//     expected = tf.constant([dtype.max - x for x in inputs], dtype=tf.float32)
12947//     tf.assert_equal(tf.cast(inverted, tf.float32), tf.cast(expected, tf.float32))
12948// ```
12949func Invert(scope *Scope, x tf.Output) (y tf.Output) {
12950	if scope.Err() != nil {
12951		return
12952	}
12953	opspec := tf.OpSpec{
12954		Type: "Invert",
12955		Input: []tf.Input{
12956			x,
12957		},
12958	}
12959	op := scope.AddOperation(opspec)
12960	return op.Output(0)
12961}
12962
12963// DecodePngAttr is an optional argument to DecodePng.
12964type DecodePngAttr func(optionalAttr)
12965
12966// DecodePngChannels sets the optional channels attribute to value.
12967//
12968// value: Number of color channels for the decoded image.
12969// If not specified, defaults to 0
12970func DecodePngChannels(value int64) DecodePngAttr {
12971	return func(m optionalAttr) {
12972		m["channels"] = value
12973	}
12974}
12975
12976// DecodePngDtype sets the optional dtype attribute to value.
12977// If not specified, defaults to DT_UINT8
12978func DecodePngDtype(value tf.DataType) DecodePngAttr {
12979	return func(m optionalAttr) {
12980		m["dtype"] = value
12981	}
12982}
12983
12984// Decode a PNG-encoded image to a uint8 or uint16 tensor.
12985//
12986// The attr `channels` indicates the desired number of color channels for the
12987// decoded image.
12988//
12989// Accepted values are:
12990//
12991// *   0: Use the number of channels in the PNG-encoded image.
12992// *   1: output a grayscale image.
12993// *   3: output an RGB image.
12994// *   4: output an RGBA image.
12995//
12996// If needed, the PNG-encoded image is transformed to match the requested number
12997// of color channels.
12998//
12999// This op also supports decoding JPEGs and non-animated GIFs since the interface
13000// is the same, though it is cleaner to use `tf.io.decode_image`.
13001//
13002// Arguments:
13003//	contents: 0-D.  The PNG-encoded image.
13004//
13005// Returns 3-D with shape `[height, width, channels]`.
13006func DecodePng(scope *Scope, contents tf.Output, optional ...DecodePngAttr) (image tf.Output) {
13007	if scope.Err() != nil {
13008		return
13009	}
13010	attrs := map[string]interface{}{}
13011	for _, a := range optional {
13012		a(attrs)
13013	}
13014	opspec := tf.OpSpec{
13015		Type: "DecodePng",
13016		Input: []tf.Input{
13017			contents,
13018		},
13019		Attrs: attrs,
13020	}
13021	op := scope.AddOperation(opspec)
13022	return op.Output(0)
13023}
13024
13025// Adjust the saturation of one or more images.
13026//
13027// `images` is a tensor of at least 3 dimensions.  The last dimension is
13028// interpreted as channels, and must be three.
13029//
13030// The input image is considered in the RGB colorspace. Conceptually, the RGB
13031// colors are first mapped into HSV. A scale is then applied all the saturation
13032// values, and then remapped back to RGB colorspace.
13033//
13034// Arguments:
13035//	images: Images to adjust.  At least 3-D.
13036//	scale: A float scale to add to the saturation.
13037//
13038// Returns The hue-adjusted image or images.
13039func AdjustSaturation(scope *Scope, images tf.Output, scale tf.Output) (output tf.Output) {
13040	if scope.Err() != nil {
13041		return
13042	}
13043	opspec := tf.OpSpec{
13044		Type: "AdjustSaturation",
13045		Input: []tf.Input{
13046			images, scale,
13047		},
13048	}
13049	op := scope.AddOperation(opspec)
13050	return op.Output(0)
13051}
13052
13053// Adjust the contrast of one or more images.
13054//
13055// `images` is a tensor of at least 3 dimensions.  The last 3 dimensions are
13056// interpreted as `[height, width, channels]`.  The other dimensions only
13057// represent a collection of images, such as `[batch, height, width, channels].`
13058//
13059// Contrast is adjusted independently for each channel of each image.
13060//
13061// For each channel, the Op first computes the mean of the image pixels in the
13062// channel and then adjusts each component of each pixel to
13063// `(x - mean) * contrast_factor + mean`.
13064//
13065// Arguments:
13066//	images: Images to adjust.  At least 3-D.
13067//	contrast_factor: A float multiplier for adjusting contrast.
13068//
13069// Returns The contrast-adjusted image or images.
13070func AdjustContrastv2(scope *Scope, images tf.Output, contrast_factor tf.Output) (output tf.Output) {
13071	if scope.Err() != nil {
13072		return
13073	}
13074	opspec := tf.OpSpec{
13075		Type: "AdjustContrastv2",
13076		Input: []tf.Input{
13077			images, contrast_factor,
13078		},
13079	}
13080	op := scope.AddOperation(opspec)
13081	return op.Output(0)
13082}
13083
13084// Initializes the multi device iterator with the given dataset.
13085//
13086// Arguments:
13087//	dataset: Dataset to be iterated upon.
13088//	multi_device_iterator: A MultiDeviceIteratorResource.
13089//	max_buffer_size: The maximum size of the host side per device buffer to keep.
13090//
13091// Returns An int64 indicating which incarnation of the MultiDeviceIterator
13092// is running.
13093func MultiDeviceIteratorInit(scope *Scope, dataset tf.Output, multi_device_iterator tf.Output, max_buffer_size tf.Output) (incarnation_id tf.Output) {
13094	if scope.Err() != nil {
13095		return
13096	}
13097	opspec := tf.OpSpec{
13098		Type: "MultiDeviceIteratorInit",
13099		Input: []tf.Input{
13100			dataset, multi_device_iterator, max_buffer_size,
13101		},
13102	}
13103	op := scope.AddOperation(opspec)
13104	return op.Output(0)
13105}
13106
13107// Deprecated. Disallowed in GraphDef version >= 2.
13108//
13109// DEPRECATED at GraphDef version 2: Use AdjustContrastv2 instead
13110func AdjustContrast(scope *Scope, images tf.Output, contrast_factor tf.Output, min_value tf.Output, max_value tf.Output) (output tf.Output) {
13111	if scope.Err() != nil {
13112		return
13113	}
13114	opspec := tf.OpSpec{
13115		Type: "AdjustContrast",
13116		Input: []tf.Input{
13117			images, contrast_factor, min_value, max_value,
13118		},
13119	}
13120	op := scope.AddOperation(opspec)
13121	return op.Output(0)
13122}
13123
13124// ExtractJpegShapeAttr is an optional argument to ExtractJpegShape.
13125type ExtractJpegShapeAttr func(optionalAttr)
13126
13127// ExtractJpegShapeOutputType sets the optional output_type attribute to value.
13128//
13129// value: (Optional) The output type of the operation (int32 or int64).
13130// Defaults to int32.
13131// If not specified, defaults to DT_INT32
13132func ExtractJpegShapeOutputType(value tf.DataType) ExtractJpegShapeAttr {
13133	return func(m optionalAttr) {
13134		m["output_type"] = value
13135	}
13136}
13137
13138// Extract the shape information of a JPEG-encoded image.
13139//
13140// This op only parses the image header, so it is much faster than DecodeJpeg.
13141//
13142// Arguments:
13143//	contents: 0-D. The JPEG-encoded image.
13144//
13145// Returns 1-D. The image shape with format [height, width, channels].
13146func ExtractJpegShape(scope *Scope, contents tf.Output, optional ...ExtractJpegShapeAttr) (image_shape tf.Output) {
13147	if scope.Err() != nil {
13148		return
13149	}
13150	attrs := map[string]interface{}{}
13151	for _, a := range optional {
13152		a(attrs)
13153	}
13154	opspec := tf.OpSpec{
13155		Type: "ExtractJpegShape",
13156		Input: []tf.Input{
13157			contents,
13158		},
13159		Attrs: attrs,
13160	}
13161	op := scope.AddOperation(opspec)
13162	return op.Output(0)
13163}
13164
13165// JPEG encode input image with provided compression quality.
13166//
13167// `image` is a 3-D uint8 Tensor of shape `[height, width, channels]`.
13168// `quality` is an int32 jpeg compression quality value between 0 and 100.
13169//
13170//
13171// Arguments:
13172//	images: Images to adjust.  At least 3-D.
13173//	quality: An int quality to encode to.
13174//
13175// Returns 0-D. JPEG-encoded image.
13176func EncodeJpegVariableQuality(scope *Scope, images tf.Output, quality tf.Output) (contents tf.Output) {
13177	if scope.Err() != nil {
13178		return
13179	}
13180	opspec := tf.OpSpec{
13181		Type: "EncodeJpegVariableQuality",
13182		Input: []tf.Input{
13183			images, quality,
13184		},
13185	}
13186	op := scope.AddOperation(opspec)
13187	return op.Output(0)
13188}
13189
13190// Returns the rank of a tensor.
13191//
13192// This operation returns an integer representing the rank of `input`.
13193//
13194// For example:
13195//
13196// ```
13197// # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
13198// # shape of tensor 't' is [2, 2, 3]
13199// rank(t) ==> 3
13200// ```
13201//
13202// **Note**: The rank of a tensor is not the same as the rank of a matrix. The rank
13203// of a tensor is the number of indices required to uniquely select each element
13204// of the tensor. Rank is also known as "order", "degree", or "ndims."
13205func Rank(scope *Scope, input tf.Output) (output tf.Output) {
13206	if scope.Err() != nil {
13207		return
13208	}
13209	opspec := tf.OpSpec{
13210		Type: "Rank",
13211		Input: []tf.Input{
13212			input,
13213		},
13214	}
13215	op := scope.AddOperation(opspec)
13216	return op.Output(0)
13217}
13218
13219// EncodeJpegAttr is an optional argument to EncodeJpeg.
13220type EncodeJpegAttr func(optionalAttr)
13221
13222// EncodeJpegFormat sets the optional format attribute to value.
13223//
13224// value: Per pixel image format.
13225// If not specified, defaults to ""
13226func EncodeJpegFormat(value string) EncodeJpegAttr {
13227	return func(m optionalAttr) {
13228		m["format"] = value
13229	}
13230}
13231
13232// EncodeJpegQuality sets the optional quality attribute to value.
13233//
13234// value: Quality of the compression from 0 to 100 (higher is better and slower).
13235// If not specified, defaults to 95
13236func EncodeJpegQuality(value int64) EncodeJpegAttr {
13237	return func(m optionalAttr) {
13238		m["quality"] = value
13239	}
13240}
13241
13242// EncodeJpegProgressive sets the optional progressive attribute to value.
13243//
13244// value: If True, create a JPEG that loads progressively (coarse to fine).
13245// If not specified, defaults to false
13246func EncodeJpegProgressive(value bool) EncodeJpegAttr {
13247	return func(m optionalAttr) {
13248		m["progressive"] = value
13249	}
13250}
13251
13252// EncodeJpegOptimizeSize sets the optional optimize_size attribute to value.
13253//
13254// value: If True, spend CPU/RAM to reduce size with no quality change.
13255// If not specified, defaults to false
13256func EncodeJpegOptimizeSize(value bool) EncodeJpegAttr {
13257	return func(m optionalAttr) {
13258		m["optimize_size"] = value
13259	}
13260}
13261
13262// EncodeJpegChromaDownsampling sets the optional chroma_downsampling attribute to value.
13263//
13264// value: See http://en.wikipedia.org/wiki/Chroma_subsampling.
13265// If not specified, defaults to true
13266func EncodeJpegChromaDownsampling(value bool) EncodeJpegAttr {
13267	return func(m optionalAttr) {
13268		m["chroma_downsampling"] = value
13269	}
13270}
13271
13272// EncodeJpegDensityUnit sets the optional density_unit attribute to value.
13273//
13274// value: Unit used to specify `x_density` and `y_density`:
13275// pixels per inch (`'in'`) or centimeter (`'cm'`).
13276// If not specified, defaults to "in"
13277func EncodeJpegDensityUnit(value string) EncodeJpegAttr {
13278	return func(m optionalAttr) {
13279		m["density_unit"] = value
13280	}
13281}
13282
13283// EncodeJpegXDensity sets the optional x_density attribute to value.
13284//
13285// value: Horizontal pixels per density unit.
13286// If not specified, defaults to 300
13287func EncodeJpegXDensity(value int64) EncodeJpegAttr {
13288	return func(m optionalAttr) {
13289		m["x_density"] = value
13290	}
13291}
13292
13293// EncodeJpegYDensity sets the optional y_density attribute to value.
13294//
13295// value: Vertical pixels per density unit.
13296// If not specified, defaults to 300
13297func EncodeJpegYDensity(value int64) EncodeJpegAttr {
13298	return func(m optionalAttr) {
13299		m["y_density"] = value
13300	}
13301}
13302
13303// EncodeJpegXmpMetadata sets the optional xmp_metadata attribute to value.
13304//
13305// value: If not empty, embed this XMP metadata in the image header.
13306// If not specified, defaults to ""
13307func EncodeJpegXmpMetadata(value string) EncodeJpegAttr {
13308	return func(m optionalAttr) {
13309		m["xmp_metadata"] = value
13310	}
13311}
13312
13313// JPEG-encode an image.
13314//
13315// `image` is a 3-D uint8 Tensor of shape `[height, width, channels]`.
13316//
13317// The attr `format` can be used to override the color format of the encoded
13318// output.  Values can be:
13319//
13320// *   `''`: Use a default format based on the number of channels in the image.
13321// *   `grayscale`: Output a grayscale JPEG image.  The `channels` dimension
13322//     of `image` must be 1.
13323// *   `rgb`: Output an RGB JPEG image. The `channels` dimension
13324//     of `image` must be 3.
13325//
13326// If `format` is not specified or is the empty string, a default format is picked
13327// in function of the number of channels in `image`:
13328//
13329// *   1: Output a grayscale image.
13330// *   3: Output an RGB image.
13331//
13332// Arguments:
13333//	image: 3-D with shape `[height, width, channels]`.
13334//
13335// Returns 0-D. JPEG-encoded image.
13336func EncodeJpeg(scope *Scope, image tf.Output, optional ...EncodeJpegAttr) (contents tf.Output) {
13337	if scope.Err() != nil {
13338		return
13339	}
13340	attrs := map[string]interface{}{}
13341	for _, a := range optional {
13342		a(attrs)
13343	}
13344	opspec := tf.OpSpec{
13345		Type: "EncodeJpeg",
13346		Input: []tf.Input{
13347			image,
13348		},
13349		Attrs: attrs,
13350	}
13351	op := scope.AddOperation(opspec)
13352	return op.Output(0)
13353}
13354
13355// DecodeAndCropJpegAttr is an optional argument to DecodeAndCropJpeg.
13356type DecodeAndCropJpegAttr func(optionalAttr)
13357
13358// DecodeAndCropJpegChannels sets the optional channels attribute to value.
13359//
13360// value: Number of color channels for the decoded image.
13361// If not specified, defaults to 0
13362func DecodeAndCropJpegChannels(value int64) DecodeAndCropJpegAttr {
13363	return func(m optionalAttr) {
13364		m["channels"] = value
13365	}
13366}
13367
13368// DecodeAndCropJpegRatio sets the optional ratio attribute to value.
13369//
13370// value: Downscaling ratio.
13371// If not specified, defaults to 1
13372func DecodeAndCropJpegRatio(value int64) DecodeAndCropJpegAttr {
13373	return func(m optionalAttr) {
13374		m["ratio"] = value
13375	}
13376}
13377
13378// DecodeAndCropJpegFancyUpscaling sets the optional fancy_upscaling attribute to value.
13379//
13380// value: If true use a slower but nicer upscaling of the
13381// chroma planes (yuv420/422 only).
13382// If not specified, defaults to true
13383func DecodeAndCropJpegFancyUpscaling(value bool) DecodeAndCropJpegAttr {
13384	return func(m optionalAttr) {
13385		m["fancy_upscaling"] = value
13386	}
13387}
13388
13389// DecodeAndCropJpegTryRecoverTruncated sets the optional try_recover_truncated attribute to value.
13390//
13391// value: If true try to recover an image from truncated input.
13392// If not specified, defaults to false
13393func DecodeAndCropJpegTryRecoverTruncated(value bool) DecodeAndCropJpegAttr {
13394	return func(m optionalAttr) {
13395		m["try_recover_truncated"] = value
13396	}
13397}
13398
13399// DecodeAndCropJpegAcceptableFraction sets the optional acceptable_fraction attribute to value.
13400//
13401// value: The minimum required fraction of lines before a truncated
13402// input is accepted.
13403// If not specified, defaults to 1
13404func DecodeAndCropJpegAcceptableFraction(value float32) DecodeAndCropJpegAttr {
13405	return func(m optionalAttr) {
13406		m["acceptable_fraction"] = value
13407	}
13408}
13409
13410// DecodeAndCropJpegDctMethod sets the optional dct_method attribute to value.
13411//
13412// value: string specifying a hint about the algorithm used for
13413// decompression.  Defaults to "" which maps to a system-specific
13414// default.  Currently valid values are ["INTEGER_FAST",
13415// "INTEGER_ACCURATE"].  The hint may be ignored (e.g., the internal
13416// jpeg library changes to a version that does not have that specific
13417// option.)
13418// If not specified, defaults to ""
13419func DecodeAndCropJpegDctMethod(value string) DecodeAndCropJpegAttr {
13420	return func(m optionalAttr) {
13421		m["dct_method"] = value
13422	}
13423}
13424
13425// Decode and Crop a JPEG-encoded image to a uint8 tensor.
13426//
13427// The attr `channels` indicates the desired number of color channels for the
13428// decoded image.
13429//
13430// Accepted values are:
13431//
13432// *   0: Use the number of channels in the JPEG-encoded image.
13433// *   1: output a grayscale image.
13434// *   3: output an RGB image.
13435//
13436// If needed, the JPEG-encoded image is transformed to match the requested number
13437// of color channels.
13438//
13439// The attr `ratio` allows downscaling the image by an integer factor during
13440// decoding.  Allowed values are: 1, 2, 4, and 8.  This is much faster than
13441// downscaling the image later.
13442//
13443//
13444// It is equivalent to a combination of decode and crop, but much faster by only
13445// decoding partial jpeg image.
13446//
13447// Arguments:
13448//	contents: 0-D.  The JPEG-encoded image.
13449//	crop_window: 1-D.  The crop window: [crop_y, crop_x, crop_height, crop_width].
13450//
13451// Returns 3-D with shape `[height, width, channels]`..
13452func DecodeAndCropJpeg(scope *Scope, contents tf.Output, crop_window tf.Output, optional ...DecodeAndCropJpegAttr) (image tf.Output) {
13453	if scope.Err() != nil {
13454		return
13455	}
13456	attrs := map[string]interface{}{}
13457	for _, a := range optional {
13458		a(attrs)
13459	}
13460	opspec := tf.OpSpec{
13461		Type: "DecodeAndCropJpeg",
13462		Input: []tf.Input{
13463			contents, crop_window,
13464		},
13465		Attrs: attrs,
13466	}
13467	op := scope.AddOperation(opspec)
13468	return op.Output(0)
13469}
13470
13471// RandomCropAttr is an optional argument to RandomCrop.
13472type RandomCropAttr func(optionalAttr)
13473
13474// RandomCropSeed sets the optional seed attribute to value.
13475//
13476// value: If either seed or seed2 are set to be non-zero, the random number
13477// generator is seeded by the given seed.  Otherwise, it is seeded by a
13478// random seed.
13479// If not specified, defaults to 0
13480func RandomCropSeed(value int64) RandomCropAttr {
13481	return func(m optionalAttr) {
13482		m["seed"] = value
13483	}
13484}
13485
13486// RandomCropSeed2 sets the optional seed2 attribute to value.
13487//
13488// value: An second seed to avoid seed collision.
13489// If not specified, defaults to 0
13490func RandomCropSeed2(value int64) RandomCropAttr {
13491	return func(m optionalAttr) {
13492		m["seed2"] = value
13493	}
13494}
13495
13496// Randomly crop `image`.
13497//
13498// DEPRECATED at GraphDef version 8: Random crop is now pure Python
13499//
13500// `size` is a 1-D int64 tensor with 2 elements representing the crop height and
13501// width.  The values must be non negative.
13502//
13503// This Op picks a random location in `image` and crops a `height` by `width`
13504// rectangle from that location.  The random location is picked so the cropped
13505// area will fit inside the original image.
13506//
13507// Arguments:
13508//	image: 3-D of shape `[height, width, channels]`.
13509//	size: 1-D of length 2 containing: `crop_height`, `crop_width`..
13510//
13511// Returns 3-D of shape `[crop_height, crop_width, channels].`
13512func RandomCrop(scope *Scope, image tf.Output, size tf.Output, optional ...RandomCropAttr) (output tf.Output) {
13513	if scope.Err() != nil {
13514		return
13515	}
13516	attrs := map[string]interface{}{}
13517	for _, a := range optional {
13518		a(attrs)
13519	}
13520	opspec := tf.OpSpec{
13521		Type: "RandomCrop",
13522		Input: []tf.Input{
13523			image, size,
13524		},
13525		Attrs: attrs,
13526	}
13527	op := scope.AddOperation(opspec)
13528	return op.Output(0)
13529}
13530
13531// ResizeNearestNeighborGradAttr is an optional argument to ResizeNearestNeighborGrad.
13532type ResizeNearestNeighborGradAttr func(optionalAttr)
13533
13534// ResizeNearestNeighborGradAlignCorners sets the optional align_corners attribute to value.
13535//
13536// value: If true, the centers of the 4 corner pixels of the input and grad tensors are
13537// aligned. Defaults to false.
13538// If not specified, defaults to false
13539func ResizeNearestNeighborGradAlignCorners(value bool) ResizeNearestNeighborGradAttr {
13540	return func(m optionalAttr) {
13541		m["align_corners"] = value
13542	}
13543}
13544
13545// ResizeNearestNeighborGradHalfPixelCenters sets the optional half_pixel_centers attribute to value.
13546// If not specified, defaults to false
13547func ResizeNearestNeighborGradHalfPixelCenters(value bool) ResizeNearestNeighborGradAttr {
13548	return func(m optionalAttr) {
13549		m["half_pixel_centers"] = value
13550	}
13551}
13552
13553// Computes the gradient of nearest neighbor interpolation.
13554//
13555// Arguments:
13556//	grads: 4-D with shape `[batch, height, width, channels]`.
13557//	size: = A 1-D int32 Tensor of 2 elements: `orig_height, orig_width`. The
13558// original input size.
13559//
13560// Returns 4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients
13561// with respect to the input image.
13562func ResizeNearestNeighborGrad(scope *Scope, grads tf.Output, size tf.Output, optional ...ResizeNearestNeighborGradAttr) (output tf.Output) {
13563	if scope.Err() != nil {
13564		return
13565	}
13566	attrs := map[string]interface{}{}
13567	for _, a := range optional {
13568		a(attrs)
13569	}
13570	opspec := tf.OpSpec{
13571		Type: "ResizeNearestNeighborGrad",
13572		Input: []tf.Input{
13573			grads, size,
13574		},
13575		Attrs: attrs,
13576	}
13577	op := scope.AddOperation(opspec)
13578	return op.Output(0)
13579}
13580
13581// Runs multiple additive regression ensemble predictors on input instances and
13582//
13583// computes the logits. It is designed to be used during prediction.
13584// It traverses all the trees and calculates the final score for each instance.
13585//
13586// Arguments:
13587//
13588//	bucketized_features: A list of rank 1 Tensors containing bucket id for each
13589// feature.
13590//	logits_dimension: scalar, dimension of the logits, to be used for partial logits
13591// shape.
13592//
13593// Returns Output rank 2 Tensor containing logits for each example.
13594func BoostedTreesPredict(scope *Scope, tree_ensemble_handle tf.Output, bucketized_features []tf.Output, logits_dimension int64) (logits tf.Output) {
13595	if scope.Err() != nil {
13596		return
13597	}
13598	attrs := map[string]interface{}{"logits_dimension": logits_dimension}
13599	opspec := tf.OpSpec{
13600		Type: "BoostedTreesPredict",
13601		Input: []tf.Input{
13602			tree_ensemble_handle, tf.OutputList(bucketized_features),
13603		},
13604		Attrs: attrs,
13605	}
13606	op := scope.AddOperation(opspec)
13607	return op.Output(0)
13608}
13609
13610// ResizeBilinearAttr is an optional argument to ResizeBilinear.
13611type ResizeBilinearAttr func(optionalAttr)
13612
13613// ResizeBilinearAlignCorners sets the optional align_corners attribute to value.
13614//
13615// value: If true, the centers of the 4 corner pixels of the input and output tensors are
13616// aligned, preserving the values at the corner pixels. Defaults to false.
13617// If not specified, defaults to false
13618func ResizeBilinearAlignCorners(value bool) ResizeBilinearAttr {
13619	return func(m optionalAttr) {
13620		m["align_corners"] = value
13621	}
13622}
13623
13624// ResizeBilinearHalfPixelCenters sets the optional half_pixel_centers attribute to value.
13625// If not specified, defaults to false
13626func ResizeBilinearHalfPixelCenters(value bool) ResizeBilinearAttr {
13627	return func(m optionalAttr) {
13628		m["half_pixel_centers"] = value
13629	}
13630}
13631
13632// Resize `images` to `size` using bilinear interpolation.
13633//
13634// Input images can be of different types but output images are always float.
13635//
13636// Arguments:
13637//	images: 4-D with shape `[batch, height, width, channels]`.
13638//	size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
13639// new size for the images.
13640//
13641// Returns 4-D with shape
13642// `[batch, new_height, new_width, channels]`.
13643func ResizeBilinear(scope *Scope, images tf.Output, size tf.Output, optional ...ResizeBilinearAttr) (resized_images tf.Output) {
13644	if scope.Err() != nil {
13645		return
13646	}
13647	attrs := map[string]interface{}{}
13648	for _, a := range optional {
13649		a(attrs)
13650	}
13651	opspec := tf.OpSpec{
13652		Type: "ResizeBilinear",
13653		Input: []tf.Input{
13654			images, size,
13655		},
13656		Attrs: attrs,
13657	}
13658	op := scope.AddOperation(opspec)
13659	return op.Output(0)
13660}
13661
13662// Returns the set of files matching one or more glob patterns.
13663//
13664// Note that this routine only supports wildcard characters in the
13665// basename portion of the pattern, not in the directory portion.
13666// Note also that the order of filenames returned is deterministic.
13667//
13668// Arguments:
13669//	pattern: Shell wildcard pattern(s). Scalar or vector of type string.
13670//
13671// Returns A vector of matching filenames.
13672func MatchingFiles(scope *Scope, pattern tf.Output) (filenames tf.Output) {
13673	if scope.Err() != nil {
13674		return
13675	}
13676	opspec := tf.OpSpec{
13677		Type: "MatchingFiles",
13678		Input: []tf.Input{
13679			pattern,
13680		},
13681	}
13682	op := scope.AddOperation(opspec)
13683	return op.Output(0)
13684}
13685
13686// Restore a Reader to its initial clean state.
13687//
13688// Arguments:
13689//	reader_handle: Handle to a Reader.
13690//
13691// Returns the created operation.
13692func ReaderResetV2(scope *Scope, reader_handle tf.Output) (o *tf.Operation) {
13693	if scope.Err() != nil {
13694		return
13695	}
13696	opspec := tf.OpSpec{
13697		Type: "ReaderResetV2",
13698		Input: []tf.Input{
13699			reader_handle,
13700		},
13701	}
13702	return scope.AddOperation(opspec)
13703}
13704
13705// Restore a reader to a previously saved state.
13706//
13707// Not all Readers support being restored, so this can produce an
13708// Unimplemented error.
13709//
13710// Arguments:
13711//	reader_handle: Handle to a Reader.
13712//	state: Result of a ReaderSerializeState of a Reader with type
13713// matching reader_handle.
13714//
13715// Returns the created operation.
13716func ReaderRestoreStateV2(scope *Scope, reader_handle tf.Output, state tf.Output) (o *tf.Operation) {
13717	if scope.Err() != nil {
13718		return
13719	}
13720	opspec := tf.OpSpec{
13721		Type: "ReaderRestoreStateV2",
13722		Input: []tf.Input{
13723			reader_handle, state,
13724		},
13725	}
13726	return scope.AddOperation(opspec)
13727}
13728
13729// ResizeAreaAttr is an optional argument to ResizeArea.
13730type ResizeAreaAttr func(optionalAttr)
13731
13732// ResizeAreaAlignCorners sets the optional align_corners attribute to value.
13733//
13734// value: If true, the centers of the 4 corner pixels of the input and output tensors are
13735// aligned, preserving the values at the corner pixels. Defaults to false.
13736// If not specified, defaults to false
13737func ResizeAreaAlignCorners(value bool) ResizeAreaAttr {
13738	return func(m optionalAttr) {
13739		m["align_corners"] = value
13740	}
13741}
13742
13743// Resize `images` to `size` using area interpolation.
13744//
13745// Input images can be of different types but output images are always float.
13746//
13747// The range of pixel values for the output image might be slightly different
13748// from the range for the input image because of limited numerical precision.
13749// To guarantee an output range, for example `[0.0, 1.0]`, apply
13750// `tf.clip_by_value` to the output.
13751//
13752// Each output pixel is computed by first transforming the pixel's footprint into
13753// the input tensor and then averaging the pixels that intersect the footprint. An
13754// input pixel's contribution to the average is weighted by the fraction of its
13755// area that intersects the footprint.  This is the same as OpenCV's INTER_AREA.
13756//
13757// Arguments:
13758//	images: 4-D with shape `[batch, height, width, channels]`.
13759//	size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
13760// new size for the images.
13761//
13762// Returns 4-D with shape
13763// `[batch, new_height, new_width, channels]`.
13764func ResizeArea(scope *Scope, images tf.Output, size tf.Output, optional ...ResizeAreaAttr) (resized_images tf.Output) {
13765	if scope.Err() != nil {
13766		return
13767	}
13768	attrs := map[string]interface{}{}
13769	for _, a := range optional {
13770		a(attrs)
13771	}
13772	opspec := tf.OpSpec{
13773		Type: "ResizeArea",
13774		Input: []tf.Input{
13775			images, size,
13776		},
13777		Attrs: attrs,
13778	}
13779	op := scope.AddOperation(opspec)
13780	return op.Output(0)
13781}
13782
13783// Returns the number of work units this Reader has finished processing.
13784//
13785// Arguments:
13786//	reader_handle: Handle to a Reader.
13787func ReaderNumWorkUnitsCompletedV2(scope *Scope, reader_handle tf.Output) (units_completed tf.Output) {
13788	if scope.Err() != nil {
13789		return
13790	}
13791	opspec := tf.OpSpec{
13792		Type: "ReaderNumWorkUnitsCompletedV2",
13793		Input: []tf.Input{
13794			reader_handle,
13795		},
13796	}
13797	op := scope.AddOperation(opspec)
13798	return op.Output(0)
13799}
13800
13801// Returns up to `num_records` (key, value) pairs produced by a Reader.
13802//
13803// Will dequeue from the input queue if necessary (e.g. when the
13804// Reader needs to start reading from a new file since it has finished
13805// with the previous file).
13806// It may return less than `num_records` even before the last batch.
13807//
13808// Arguments:
13809//	reader_handle: Handle to a `Reader`.
13810//	queue_handle: Handle to a `Queue`, with string work items.
13811//	num_records: number of records to read from `Reader`.
13812//
13813// Returns:
13814//	keys: A 1-D tensor.
13815//	values: A 1-D tensor.
13816func ReaderReadUpToV2(scope *Scope, reader_handle tf.Output, queue_handle tf.Output, num_records tf.Output) (keys tf.Output, values tf.Output) {
13817	if scope.Err() != nil {
13818		return
13819	}
13820	opspec := tf.OpSpec{
13821		Type: "ReaderReadUpToV2",
13822		Input: []tf.Input{
13823			reader_handle, queue_handle, num_records,
13824		},
13825	}
13826	op := scope.AddOperation(opspec)
13827	return op.Output(0), op.Output(1)
13828}
13829
13830// QueueDequeueV2Attr is an optional argument to QueueDequeueV2.
13831type QueueDequeueV2Attr func(optionalAttr)
13832
13833// QueueDequeueV2TimeoutMs sets the optional timeout_ms attribute to value.
13834//
13835// value: If the queue is empty, this operation will block for up to
13836// timeout_ms milliseconds.
13837// Note: This option is not supported yet.
13838// If not specified, defaults to -1
13839func QueueDequeueV2TimeoutMs(value int64) QueueDequeueV2Attr {
13840	return func(m optionalAttr) {
13841		m["timeout_ms"] = value
13842	}
13843}
13844
13845// Dequeues a tuple of one or more tensors from the given queue.
13846//
13847// This operation has k outputs, where k is the number of components
13848// in the tuples stored in the given queue, and output i is the ith
13849// component of the dequeued tuple.
13850//
13851// N.B. If the queue is empty, this operation will block until an element
13852// has been dequeued (or 'timeout_ms' elapses, if specified).
13853//
13854// Arguments:
13855//	handle: The handle to a queue.
13856//	component_types: The type of each component in a tuple.
13857//
13858// Returns One or more tensors that were dequeued as a tuple.
13859func QueueDequeueV2(scope *Scope, handle tf.Output, component_types []tf.DataType, optional ...QueueDequeueV2Attr) (components []tf.Output) {
13860	if scope.Err() != nil {
13861		return
13862	}
13863	attrs := map[string]interface{}{"component_types": component_types}
13864	for _, a := range optional {
13865		a(attrs)
13866	}
13867	opspec := tf.OpSpec{
13868		Type: "QueueDequeueV2",
13869		Input: []tf.Input{
13870			handle,
13871		},
13872		Attrs: attrs,
13873	}
13874	op := scope.AddOperation(opspec)
13875	if scope.Err() != nil {
13876		return
13877	}
13878	var idx int
13879	var err error
13880	if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
13881		scope.UpdateErr("QueueDequeueV2", err)
13882		return
13883	}
13884	return components
13885}
13886
13887// Returns the next record (key, value pair) produced by a Reader.
13888//
13889// Will dequeue from the input queue if necessary (e.g. when the
13890// Reader needs to start reading from a new file since it has finished
13891// with the previous file).
13892//
13893// Arguments:
13894//	reader_handle: Handle to a Reader.
13895//	queue_handle: Handle to a Queue, with string work items.
13896//
13897// Returns:
13898//	key: A scalar.
13899//	value: A scalar.
13900func ReaderReadV2(scope *Scope, reader_handle tf.Output, queue_handle tf.Output) (key tf.Output, value tf.Output) {
13901	if scope.Err() != nil {
13902		return
13903	}
13904	opspec := tf.OpSpec{
13905		Type: "ReaderReadV2",
13906		Input: []tf.Input{
13907			reader_handle, queue_handle,
13908		},
13909	}
13910	op := scope.AddOperation(opspec)
13911	return op.Output(0), op.Output(1)
13912}
13913
13914// Return a slice from 'input'.
13915//
13916// The output tensor is a tensor with dimensions described by 'size'
13917// whose values are extracted from 'input' starting at the offsets in
13918// 'begin'.
13919//
13920// *Requirements*:
13921//   0 <= begin[i] <= begin[i] + size[i] <= Di  for i in [0, n)
13922//
13923// Arguments:
13924//
13925//	begin: begin[i] specifies the offset into the 'i'th dimension of
13926// 'input' to slice from.
13927//	size: size[i] specifies the number of elements of the 'i'th dimension
13928// of 'input' to slice. If size[i] is -1, all remaining elements in dimension
13929// i are included in the slice (i.e. this is equivalent to setting
13930// size[i] = input.dim_size(i) - begin[i]).
13931func Slice(scope *Scope, input tf.Output, begin tf.Output, size tf.Output) (output tf.Output) {
13932	if scope.Err() != nil {
13933		return
13934	}
13935	opspec := tf.OpSpec{
13936		Type: "Slice",
13937		Input: []tf.Input{
13938			input, begin, size,
13939		},
13940	}
13941	op := scope.AddOperation(opspec)
13942	return op.Output(0)
13943}
13944
13945// TFRecordReaderV2Attr is an optional argument to TFRecordReaderV2.
13946type TFRecordReaderV2Attr func(optionalAttr)
13947
13948// TFRecordReaderV2Container sets the optional container attribute to value.
13949//
13950// value: If non-empty, this reader is placed in the given container.
13951// Otherwise, a default container is used.
13952// If not specified, defaults to ""
13953func TFRecordReaderV2Container(value string) TFRecordReaderV2Attr {
13954	return func(m optionalAttr) {
13955		m["container"] = value
13956	}
13957}
13958
13959// TFRecordReaderV2SharedName sets the optional shared_name attribute to value.
13960//
13961// value: If non-empty, this reader is named in the given bucket
13962// with this shared_name. Otherwise, the node name is used instead.
13963// If not specified, defaults to ""
13964func TFRecordReaderV2SharedName(value string) TFRecordReaderV2Attr {
13965	return func(m optionalAttr) {
13966		m["shared_name"] = value
13967	}
13968}
13969
13970// TFRecordReaderV2CompressionType sets the optional compression_type attribute to value.
13971// If not specified, defaults to ""
13972func TFRecordReaderV2CompressionType(value string) TFRecordReaderV2Attr {
13973	return func(m optionalAttr) {
13974		m["compression_type"] = value
13975	}
13976}
13977
13978// A Reader that outputs the records from a TensorFlow Records file.
13979//
13980// Returns The handle to reference the Reader.
13981func TFRecordReaderV2(scope *Scope, optional ...TFRecordReaderV2Attr) (reader_handle tf.Output) {
13982	if scope.Err() != nil {
13983		return
13984	}
13985	attrs := map[string]interface{}{}
13986	for _, a := range optional {
13987		a(attrs)
13988	}
13989	opspec := tf.OpSpec{
13990		Type: "TFRecordReaderV2",
13991
13992		Attrs: attrs,
13993	}
13994	op := scope.AddOperation(opspec)
13995	return op.Output(0)
13996}
13997
13998// ParseExampleDatasetAttr is an optional argument to ParseExampleDataset.
13999type ParseExampleDatasetAttr func(optionalAttr)
14000
14001// ParseExampleDatasetSloppy sets the optional sloppy attribute to value.
14002// If not specified, defaults to false
14003func ParseExampleDatasetSloppy(value bool) ParseExampleDatasetAttr {
14004	return func(m optionalAttr) {
14005		m["sloppy"] = value
14006	}
14007}
14008
14009// ParseExampleDatasetRaggedKeys sets the optional ragged_keys attribute to value.
14010// If not specified, defaults to <>
14011//
14012// REQUIRES: len(value) >= 0
14013func ParseExampleDatasetRaggedKeys(value []string) ParseExampleDatasetAttr {
14014	return func(m optionalAttr) {
14015		m["ragged_keys"] = value
14016	}
14017}
14018
14019// ParseExampleDatasetRaggedValueTypes sets the optional ragged_value_types attribute to value.
14020// If not specified, defaults to <>
14021//
14022// REQUIRES: len(value) >= 0
14023func ParseExampleDatasetRaggedValueTypes(value []tf.DataType) ParseExampleDatasetAttr {
14024	return func(m optionalAttr) {
14025		m["ragged_value_types"] = value
14026	}
14027}
14028
14029// ParseExampleDatasetRaggedSplitTypes sets the optional ragged_split_types attribute to value.
14030// If not specified, defaults to <>
14031//
14032// REQUIRES: len(value) >= 0
14033func ParseExampleDatasetRaggedSplitTypes(value []tf.DataType) ParseExampleDatasetAttr {
14034	return func(m optionalAttr) {
14035		m["ragged_split_types"] = value
14036	}
14037}
14038
14039// Transforms `input_dataset` containing `Example` protos as vectors of DT_STRING into a dataset of `Tensor` or `SparseTensor` objects representing the parsed features.
14040//
14041// Arguments:
14042//
14043//
14044//	dense_defaults: A dict mapping string keys to `Tensor`s.
14045// The keys of the dict must match the dense_keys of the feature.
14046//	sparse_keys: A list of string keys in the examples features.
14047// The results for these keys will be returned as `SparseTensor` objects.
14048//	dense_keys: A list of Ndense string Tensors (scalars).
14049// The keys expected in the Examples features associated with dense values.
14050//	sparse_types: A list of `DTypes` of the same length as `sparse_keys`.
14051// Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
14052// and `tf.string` (`BytesList`) are supported.
14053//	dense_shapes: List of tuples with the same length as `dense_keys`.
14054// The shape of the data for each dense feature referenced by `dense_keys`.
14055// Required for any input tensors identified by `dense_keys`.  Must be
14056// either fully defined, or may contain an unknown first dimension.
14057// An unknown first dimension means the feature is treated as having
14058// a variable number of blocks, and the output shape along this dimension
14059// is considered unknown at graph build time.  Padding is applied for
14060// minibatch elements smaller than the maximum number of blocks for the
14061// given feature along this dimension.
14062//	output_types: The type list for the return values.
14063//	output_shapes: The list of shapes being produced.
14064func ParseExampleDataset(scope *Scope, input_dataset tf.Output, num_parallel_calls tf.Output, dense_defaults []tf.Output, sparse_keys []string, dense_keys []string, sparse_types []tf.DataType, dense_shapes []tf.Shape, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ParseExampleDatasetAttr) (handle tf.Output) {
14065	if scope.Err() != nil {
14066		return
14067	}
14068	attrs := map[string]interface{}{"sparse_keys": sparse_keys, "dense_keys": dense_keys, "sparse_types": sparse_types, "dense_shapes": dense_shapes, "output_types": output_types, "output_shapes": output_shapes}
14069	for _, a := range optional {
14070		a(attrs)
14071	}
14072	opspec := tf.OpSpec{
14073		Type: "ParseExampleDataset",
14074		Input: []tf.Input{
14075			input_dataset, num_parallel_calls, tf.OutputList(dense_defaults),
14076		},
14077		Attrs: attrs,
14078	}
14079	op := scope.AddOperation(opspec)
14080	return op.Output(0)
14081}
14082
14083// IdentityReaderV2Attr is an optional argument to IdentityReaderV2.
14084type IdentityReaderV2Attr func(optionalAttr)
14085
14086// IdentityReaderV2Container sets the optional container attribute to value.
14087//
14088// value: If non-empty, this reader is placed in the given container.
14089// Otherwise, a default container is used.
14090// If not specified, defaults to ""
14091func IdentityReaderV2Container(value string) IdentityReaderV2Attr {
14092	return func(m optionalAttr) {
14093		m["container"] = value
14094	}
14095}
14096
14097// IdentityReaderV2SharedName sets the optional shared_name attribute to value.
14098//
14099// value: If non-empty, this reader is named in the given bucket
14100// with this shared_name. Otherwise, the node name is used instead.
14101// If not specified, defaults to ""
14102func IdentityReaderV2SharedName(value string) IdentityReaderV2Attr {
14103	return func(m optionalAttr) {
14104		m["shared_name"] = value
14105	}
14106}
14107
14108// A Reader that outputs the queued work as both the key and value.
14109//
14110// To use, enqueue strings in a Queue.  ReaderRead will take the front
14111// work string and output (work, work).
14112//
14113// Returns The handle to reference the Reader.
14114func IdentityReaderV2(scope *Scope, optional ...IdentityReaderV2Attr) (reader_handle tf.Output) {
14115	if scope.Err() != nil {
14116		return
14117	}
14118	attrs := map[string]interface{}{}
14119	for _, a := range optional {
14120		a(attrs)
14121	}
14122	opspec := tf.OpSpec{
14123		Type: "IdentityReaderV2",
14124
14125		Attrs: attrs,
14126	}
14127	op := scope.AddOperation(opspec)
14128	return op.Output(0)
14129}
14130
14131// FixedLengthRecordReaderV2Attr is an optional argument to FixedLengthRecordReaderV2.
14132type FixedLengthRecordReaderV2Attr func(optionalAttr)
14133
14134// FixedLengthRecordReaderV2HeaderBytes sets the optional header_bytes attribute to value.
14135//
14136// value: Number of bytes in the header, defaults to 0.
14137// If not specified, defaults to 0
14138func FixedLengthRecordReaderV2HeaderBytes(value int64) FixedLengthRecordReaderV2Attr {
14139	return func(m optionalAttr) {
14140		m["header_bytes"] = value
14141	}
14142}
14143
14144// FixedLengthRecordReaderV2FooterBytes sets the optional footer_bytes attribute to value.
14145//
14146// value: Number of bytes in the footer, defaults to 0.
14147// If not specified, defaults to 0
14148func FixedLengthRecordReaderV2FooterBytes(value int64) FixedLengthRecordReaderV2Attr {
14149	return func(m optionalAttr) {
14150		m["footer_bytes"] = value
14151	}
14152}
14153
14154// FixedLengthRecordReaderV2HopBytes sets the optional hop_bytes attribute to value.
14155//
14156// value: Number of bytes to hop before each read. Default of 0 means using
14157// record_bytes.
14158// If not specified, defaults to 0
14159func FixedLengthRecordReaderV2HopBytes(value int64) FixedLengthRecordReaderV2Attr {
14160	return func(m optionalAttr) {
14161		m["hop_bytes"] = value
14162	}
14163}
14164
14165// FixedLengthRecordReaderV2Container sets the optional container attribute to value.
14166//
14167// value: If non-empty, this reader is placed in the given container.
14168// Otherwise, a default container is used.
14169// If not specified, defaults to ""
14170func FixedLengthRecordReaderV2Container(value string) FixedLengthRecordReaderV2Attr {
14171	return func(m optionalAttr) {
14172		m["container"] = value
14173	}
14174}
14175
14176// FixedLengthRecordReaderV2SharedName sets the optional shared_name attribute to value.
14177//
14178// value: If non-empty, this reader is named in the given bucket
14179// with this shared_name. Otherwise, the node name is used instead.
14180// If not specified, defaults to ""
14181func FixedLengthRecordReaderV2SharedName(value string) FixedLengthRecordReaderV2Attr {
14182	return func(m optionalAttr) {
14183		m["shared_name"] = value
14184	}
14185}
14186
14187// FixedLengthRecordReaderV2Encoding sets the optional encoding attribute to value.
14188//
14189// value: The type of encoding for the file. Currently ZLIB and GZIP
14190// are supported. Defaults to none.
14191// If not specified, defaults to ""
14192func FixedLengthRecordReaderV2Encoding(value string) FixedLengthRecordReaderV2Attr {
14193	return func(m optionalAttr) {
14194		m["encoding"] = value
14195	}
14196}
14197
14198// A Reader that outputs fixed-length records from a file.
14199//
14200// Arguments:
14201//	record_bytes: Number of bytes in the record.
14202//
14203// Returns The handle to reference the Reader.
14204func FixedLengthRecordReaderV2(scope *Scope, record_bytes int64, optional ...FixedLengthRecordReaderV2Attr) (reader_handle tf.Output) {
14205	if scope.Err() != nil {
14206		return
14207	}
14208	attrs := map[string]interface{}{"record_bytes": record_bytes}
14209	for _, a := range optional {
14210		a(attrs)
14211	}
14212	opspec := tf.OpSpec{
14213		Type: "FixedLengthRecordReaderV2",
14214
14215		Attrs: attrs,
14216	}
14217	op := scope.AddOperation(opspec)
14218	return op.Output(0)
14219}
14220
14221// ExperimentalRebatchDatasetAttr is an optional argument to ExperimentalRebatchDataset.
14222type ExperimentalRebatchDatasetAttr func(optionalAttr)
14223
14224// ExperimentalRebatchDatasetUseFallback sets the optional use_fallback attribute to value.
14225// If not specified, defaults to true
14226func ExperimentalRebatchDatasetUseFallback(value bool) ExperimentalRebatchDatasetAttr {
14227	return func(m optionalAttr) {
14228		m["use_fallback"] = value
14229	}
14230}
14231
14232// Creates a dataset that changes the batch size.
14233//
14234// Creates a dataset that changes the batch size of the dataset to current batch
14235// size // num_replicas.
14236//
14237// Arguments:
14238//	input_dataset: A variant tensor representing the input dataset.
14239//	num_replicas: A scalar representing the number of replicas to distribute this batch across. As
14240// a result of this transformation the current batch size would end up being
14241// divided  by this parameter.
14242//
14243//
14244func ExperimentalRebatchDataset(scope *Scope, input_dataset tf.Output, num_replicas tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ExperimentalRebatchDatasetAttr) (handle tf.Output) {
14245	if scope.Err() != nil {
14246		return
14247	}
14248	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
14249	for _, a := range optional {
14250		a(attrs)
14251	}
14252	opspec := tf.OpSpec{
14253		Type: "ExperimentalRebatchDataset",
14254		Input: []tf.Input{
14255			input_dataset, num_replicas,
14256		},
14257		Attrs: attrs,
14258	}
14259	op := scope.AddOperation(opspec)
14260	return op.Output(0)
14261}
14262
14263// TextLineReaderV2Attr is an optional argument to TextLineReaderV2.
14264type TextLineReaderV2Attr func(optionalAttr)
14265
14266// TextLineReaderV2SkipHeaderLines sets the optional skip_header_lines attribute to value.
14267//
14268// value: Number of lines to skip from the beginning of every file.
14269// If not specified, defaults to 0
14270func TextLineReaderV2SkipHeaderLines(value int64) TextLineReaderV2Attr {
14271	return func(m optionalAttr) {
14272		m["skip_header_lines"] = value
14273	}
14274}
14275
14276// TextLineReaderV2Container sets the optional container attribute to value.
14277//
14278// value: If non-empty, this reader is placed in the given container.
14279// Otherwise, a default container is used.
14280// If not specified, defaults to ""
14281func TextLineReaderV2Container(value string) TextLineReaderV2Attr {
14282	return func(m optionalAttr) {
14283		m["container"] = value
14284	}
14285}
14286
14287// TextLineReaderV2SharedName sets the optional shared_name attribute to value.
14288//
14289// value: If non-empty, this reader is named in the given bucket
14290// with this shared_name. Otherwise, the node name is used instead.
14291// If not specified, defaults to ""
14292func TextLineReaderV2SharedName(value string) TextLineReaderV2Attr {
14293	return func(m optionalAttr) {
14294		m["shared_name"] = value
14295	}
14296}
14297
14298// A Reader that outputs the lines of a file delimited by '\n'.
14299//
14300// Returns The handle to reference the Reader.
14301func TextLineReaderV2(scope *Scope, optional ...TextLineReaderV2Attr) (reader_handle tf.Output) {
14302	if scope.Err() != nil {
14303		return
14304	}
14305	attrs := map[string]interface{}{}
14306	for _, a := range optional {
14307		a(attrs)
14308	}
14309	opspec := tf.OpSpec{
14310		Type: "TextLineReaderV2",
14311
14312		Attrs: attrs,
14313	}
14314	op := scope.AddOperation(opspec)
14315	return op.Output(0)
14316}
14317
14318// BoostedTreesQuantileStreamResourceFlushAttr is an optional argument to BoostedTreesQuantileStreamResourceFlush.
14319type BoostedTreesQuantileStreamResourceFlushAttr func(optionalAttr)
14320
14321// BoostedTreesQuantileStreamResourceFlushGenerateQuantiles sets the optional generate_quantiles attribute to value.
14322//
14323// value: bool; If True, the output will be the num_quantiles for each stream where the ith
14324// entry is the ith quantile of the input with an approximation error of epsilon.
14325// Duplicate values may be present.
14326// If False, the output will be the points in the histogram that we got which roughly
14327// translates to 1/epsilon boundaries and without any duplicates.
14328// Default to False.
14329// If not specified, defaults to false
14330func BoostedTreesQuantileStreamResourceFlushGenerateQuantiles(value bool) BoostedTreesQuantileStreamResourceFlushAttr {
14331	return func(m optionalAttr) {
14332		m["generate_quantiles"] = value
14333	}
14334}
14335
14336// Flush the summaries for a quantile stream resource.
14337//
14338// An op that flushes the summaries for a quantile stream resource.
14339//
14340// Arguments:
14341//	quantile_stream_resource_handle: resource handle referring to a QuantileStreamResource.
14342//	num_buckets: int; approximate number of buckets unless using generate_quantiles.
14343//
14344// Returns the created operation.
14345func BoostedTreesQuantileStreamResourceFlush(scope *Scope, quantile_stream_resource_handle tf.Output, num_buckets tf.Output, optional ...BoostedTreesQuantileStreamResourceFlushAttr) (o *tf.Operation) {
14346	if scope.Err() != nil {
14347		return
14348	}
14349	attrs := map[string]interface{}{}
14350	for _, a := range optional {
14351		a(attrs)
14352	}
14353	opspec := tf.OpSpec{
14354		Type: "BoostedTreesQuantileStreamResourceFlush",
14355		Input: []tf.Input{
14356			quantile_stream_resource_handle, num_buckets,
14357		},
14358		Attrs: attrs,
14359	}
14360	return scope.AddOperation(opspec)
14361}
14362
14363// WholeFileReaderV2Attr is an optional argument to WholeFileReaderV2.
14364type WholeFileReaderV2Attr func(optionalAttr)
14365
14366// WholeFileReaderV2Container sets the optional container attribute to value.
14367//
14368// value: If non-empty, this reader is placed in the given container.
14369// Otherwise, a default container is used.
14370// If not specified, defaults to ""
14371func WholeFileReaderV2Container(value string) WholeFileReaderV2Attr {
14372	return func(m optionalAttr) {
14373		m["container"] = value
14374	}
14375}
14376
14377// WholeFileReaderV2SharedName sets the optional shared_name attribute to value.
14378//
14379// value: If non-empty, this reader is named in the given bucket
14380// with this shared_name. Otherwise, the node name is used instead.
14381// If not specified, defaults to ""
14382func WholeFileReaderV2SharedName(value string) WholeFileReaderV2Attr {
14383	return func(m optionalAttr) {
14384		m["shared_name"] = value
14385	}
14386}
14387
14388// A Reader that outputs the entire contents of a file as a value.
14389//
14390// To use, enqueue filenames in a Queue.  The output of ReaderRead will
14391// be a filename (key) and the contents of that file (value).
14392//
14393// Returns The handle to reference the Reader.
14394func WholeFileReaderV2(scope *Scope, optional ...WholeFileReaderV2Attr) (reader_handle tf.Output) {
14395	if scope.Err() != nil {
14396		return
14397	}
14398	attrs := map[string]interface{}{}
14399	for _, a := range optional {
14400		a(attrs)
14401	}
14402	opspec := tf.OpSpec{
14403		Type: "WholeFileReaderV2",
14404
14405		Attrs: attrs,
14406	}
14407	op := scope.AddOperation(opspec)
14408	return op.Output(0)
14409}
14410
14411// Generate a glob pattern matching all sharded file names.
14412func ShardedFilespec(scope *Scope, basename tf.Output, num_shards tf.Output) (filename tf.Output) {
14413	if scope.Err() != nil {
14414		return
14415	}
14416	opspec := tf.OpSpec{
14417		Type: "ShardedFilespec",
14418		Input: []tf.Input{
14419			basename, num_shards,
14420		},
14421	}
14422	op := scope.AddOperation(opspec)
14423	return op.Output(0)
14424}
14425
14426// Generate a sharded filename. The filename is printf formatted as
14427//
14428//    %s-%05d-of-%05d, basename, shard, num_shards.
14429func ShardedFilename(scope *Scope, basename tf.Output, shard tf.Output, num_shards tf.Output) (filename tf.Output) {
14430	if scope.Err() != nil {
14431		return
14432	}
14433	opspec := tf.OpSpec{
14434		Type: "ShardedFilename",
14435		Input: []tf.Input{
14436			basename, shard, num_shards,
14437		},
14438	}
14439	op := scope.AddOperation(opspec)
14440	return op.Output(0)
14441}
14442
14443// RestoreSliceAttr is an optional argument to RestoreSlice.
14444type RestoreSliceAttr func(optionalAttr)
14445
14446// RestoreSlicePreferredShard sets the optional preferred_shard attribute to value.
14447//
14448// value: Index of file to open first if multiple files match
14449// `file_pattern`. See the documentation for `Restore`.
14450// If not specified, defaults to -1
14451func RestoreSlicePreferredShard(value int64) RestoreSliceAttr {
14452	return func(m optionalAttr) {
14453		m["preferred_shard"] = value
14454	}
14455}
14456
14457// Restores a tensor from checkpoint files.
14458//
14459// This is like `Restore` except that restored tensor can be listed as filling
14460// only a slice of a larger tensor.  `shape_and_slice` specifies the shape of the
14461// larger tensor and the slice that the restored tensor covers.
14462//
14463// The `shape_and_slice` input has the same format as the
14464// elements of the `shapes_and_slices` input of the `SaveSlices` op.
14465//
14466// Arguments:
14467//	file_pattern: Must have a single element. The pattern of the files from
14468// which we read the tensor.
14469//	tensor_name: Must have a single element. The name of the tensor to be
14470// restored.
14471//	shape_and_slice: Scalar. The shapes and slice specifications to use when
14472// restoring a tensors.
14473//	dt: The type of the tensor to be restored.
14474//
14475// Returns The restored tensor.
14476func RestoreSlice(scope *Scope, file_pattern tf.Output, tensor_name tf.Output, shape_and_slice tf.Output, dt tf.DataType, optional ...RestoreSliceAttr) (tensor tf.Output) {
14477	if scope.Err() != nil {
14478		return
14479	}
14480	attrs := map[string]interface{}{"dt": dt}
14481	for _, a := range optional {
14482		a(attrs)
14483	}
14484	opspec := tf.OpSpec{
14485		Type: "RestoreSlice",
14486		Input: []tf.Input{
14487			file_pattern, tensor_name, shape_and_slice,
14488		},
14489		Attrs: attrs,
14490	}
14491	op := scope.AddOperation(opspec)
14492	return op.Output(0)
14493}
14494
14495// RestoreAttr is an optional argument to Restore.
14496type RestoreAttr func(optionalAttr)
14497
14498// RestorePreferredShard sets the optional preferred_shard attribute to value.
14499//
14500// value: Index of file to open first if multiple files match
14501// `file_pattern`.
14502// If not specified, defaults to -1
14503func RestorePreferredShard(value int64) RestoreAttr {
14504	return func(m optionalAttr) {
14505		m["preferred_shard"] = value
14506	}
14507}
14508
14509// Restores a tensor from checkpoint files.
14510//
14511// Reads a tensor stored in one or several files. If there are several files (for
14512// instance because a tensor was saved as slices), `file_pattern` may contain
14513// wildcard symbols (`*` and `?`) in the filename portion only, not in the
14514// directory portion.
14515//
14516// If a `file_pattern` matches several files, `preferred_shard` can be used to hint
14517// in which file the requested tensor is likely to be found. This op will first
14518// open the file at index `preferred_shard` in the list of matching files and try
14519// to restore tensors from that file.  Only if some tensors or tensor slices are
14520// not found in that first file, then the Op opens all the files. Setting
14521// `preferred_shard` to match the value passed as the `shard` input
14522// of a matching `Save` Op may speed up Restore.  This attribute only affects
14523// performance, not correctness.  The default value -1 means files are processed in
14524// order.
14525//
14526// See also `RestoreSlice`.
14527//
14528// Arguments:
14529//	file_pattern: Must have a single element. The pattern of the files from
14530// which we read the tensor.
14531//	tensor_name: Must have a single element. The name of the tensor to be
14532// restored.
14533//	dt: The type of the tensor to be restored.
14534//
14535// Returns The restored tensor.
14536func Restore(scope *Scope, file_pattern tf.Output, tensor_name tf.Output, dt tf.DataType, optional ...RestoreAttr) (tensor tf.Output) {
14537	if scope.Err() != nil {
14538		return
14539	}
14540	attrs := map[string]interface{}{"dt": dt}
14541	for _, a := range optional {
14542		a(attrs)
14543	}
14544	opspec := tf.OpSpec{
14545		Type: "Restore",
14546		Input: []tf.Input{
14547			file_pattern, tensor_name,
14548		},
14549		Attrs: attrs,
14550	}
14551	op := scope.AddOperation(opspec)
14552	return op.Output(0)
14553}
14554
14555// Saves input tensors slices to disk.
14556//
14557// This is like `Save` except that tensors can be listed in the saved file as being
14558// a slice of a larger tensor.  `shapes_and_slices` specifies the shape of the
14559// larger tensor and the slice that this tensor covers. `shapes_and_slices` must
14560// have as many elements as `tensor_names`.
14561//
14562// Elements of the `shapes_and_slices` input must either be:
14563//
14564// *  The empty string, in which case the corresponding tensor is
14565//    saved normally.
14566// *  A string of the form `dim0 dim1 ... dimN-1 slice-spec` where the
14567//    `dimI` are the dimensions of the larger tensor and `slice-spec`
14568//    specifies what part is covered by the tensor to save.
14569//
14570// `slice-spec` itself is a `:`-separated list: `slice0:slice1:...:sliceN-1`
14571// where each `sliceI` is either:
14572//
14573// *  The string `-` meaning that the slice covers all indices of this dimension
14574// *  `start,length` where `start` and `length` are integers.  In that
14575//    case the slice covers `length` indices starting at `start`.
14576//
14577// See also `Save`.
14578//
14579// Arguments:
14580//	filename: Must have a single element. The name of the file to which we write the
14581// tensor.
14582//	tensor_names: Shape `[N]`. The names of the tensors to be saved.
14583//	shapes_and_slices: Shape `[N]`.  The shapes and slice specifications to use when
14584// saving the tensors.
14585//	data: `N` tensors to save.
14586//
14587// Returns the created operation.
14588func SaveSlices(scope *Scope, filename tf.Output, tensor_names tf.Output, shapes_and_slices tf.Output, data []tf.Output) (o *tf.Operation) {
14589	if scope.Err() != nil {
14590		return
14591	}
14592	opspec := tf.OpSpec{
14593		Type: "SaveSlices",
14594		Input: []tf.Input{
14595			filename, tensor_names, shapes_and_slices, tf.OutputList(data),
14596		},
14597	}
14598	return scope.AddOperation(opspec)
14599}
14600
14601// Saves the input tensors to disk.
14602//
14603// The size of `tensor_names` must match the number of tensors in `data`. `data[i]`
14604// is written to `filename` with name `tensor_names[i]`.
14605//
14606// See also `SaveSlices`.
14607//
14608// Arguments:
14609//	filename: Must have a single element. The name of the file to which we write
14610// the tensor.
14611//	tensor_names: Shape `[N]`. The names of the tensors to be saved.
14612//	data: `N` tensors to save.
14613//
14614// Returns the created operation.
14615func Save(scope *Scope, filename tf.Output, tensor_names tf.Output, data []tf.Output) (o *tf.Operation) {
14616	if scope.Err() != nil {
14617		return
14618	}
14619	opspec := tf.OpSpec{
14620		Type: "Save",
14621		Input: []tf.Input{
14622			filename, tensor_names, tf.OutputList(data),
14623		},
14624	}
14625	return scope.AddOperation(opspec)
14626}
14627
14628// MergeV2CheckpointsAttr is an optional argument to MergeV2Checkpoints.
14629type MergeV2CheckpointsAttr func(optionalAttr)
14630
14631// MergeV2CheckpointsDeleteOldDirs sets the optional delete_old_dirs attribute to value.
14632//
14633// value: see above.
14634// If not specified, defaults to true
14635func MergeV2CheckpointsDeleteOldDirs(value bool) MergeV2CheckpointsAttr {
14636	return func(m optionalAttr) {
14637		m["delete_old_dirs"] = value
14638	}
14639}
14640
14641// V2 format specific: merges the metadata files of sharded checkpoints.  The
14642//
14643// result is one logical checkpoint, with one physical metadata file and renamed
14644// data files.
14645//
14646// Intended for "grouping" multiple checkpoints in a sharded checkpoint setup.
14647//
14648// If delete_old_dirs is true, attempts to delete recursively the dirname of each
14649// path in the input checkpoint_prefixes.  This is useful when those paths are non
14650// user-facing temporary locations.
14651//
14652// Arguments:
14653//	checkpoint_prefixes: prefixes of V2 checkpoints to merge.
14654//	destination_prefix: scalar.  The desired final prefix.  Allowed to be the same
14655// as one of the checkpoint_prefixes.
14656//
14657// Returns the created operation.
14658func MergeV2Checkpoints(scope *Scope, checkpoint_prefixes tf.Output, destination_prefix tf.Output, optional ...MergeV2CheckpointsAttr) (o *tf.Operation) {
14659	if scope.Err() != nil {
14660		return
14661	}
14662	attrs := map[string]interface{}{}
14663	for _, a := range optional {
14664		a(attrs)
14665	}
14666	opspec := tf.OpSpec{
14667		Type: "MergeV2Checkpoints",
14668		Input: []tf.Input{
14669			checkpoint_prefixes, destination_prefix,
14670		},
14671		Attrs: attrs,
14672	}
14673	return scope.AddOperation(opspec)
14674}
14675
14676// DatasetToGraphV2Attr is an optional argument to DatasetToGraphV2.
14677type DatasetToGraphV2Attr func(optionalAttr)
14678
14679// DatasetToGraphV2ExternalStatePolicy sets the optional external_state_policy attribute to value.
14680// If not specified, defaults to 0
14681func DatasetToGraphV2ExternalStatePolicy(value int64) DatasetToGraphV2Attr {
14682	return func(m optionalAttr) {
14683		m["external_state_policy"] = value
14684	}
14685}
14686
14687// DatasetToGraphV2StripDeviceAssignment sets the optional strip_device_assignment attribute to value.
14688// If not specified, defaults to false
14689func DatasetToGraphV2StripDeviceAssignment(value bool) DatasetToGraphV2Attr {
14690	return func(m optionalAttr) {
14691		m["strip_device_assignment"] = value
14692	}
14693}
14694
14695// Returns a serialized GraphDef representing `input_dataset`.
14696//
14697// Returns a graph representation for `input_dataset`.
14698//
14699// Arguments:
14700//	input_dataset: A variant tensor representing the dataset to return the graph representation for.
14701//
14702// Returns The graph representation of the dataset (as serialized GraphDef).
14703func DatasetToGraphV2(scope *Scope, input_dataset tf.Output, optional ...DatasetToGraphV2Attr) (graph tf.Output) {
14704	if scope.Err() != nil {
14705		return
14706	}
14707	attrs := map[string]interface{}{}
14708	for _, a := range optional {
14709		a(attrs)
14710	}
14711	opspec := tf.OpSpec{
14712		Type: "DatasetToGraphV2",
14713		Input: []tf.Input{
14714			input_dataset,
14715		},
14716		Attrs: attrs,
14717	}
14718	op := scope.AddOperation(opspec)
14719	return op.Output(0)
14720}
14721
14722// Restores tensors from a V2 checkpoint.
14723//
14724// For backward compatibility with the V1 format, this Op currently allows
14725// restoring from a V1 checkpoint as well:
14726//   - This Op first attempts to find the V2 index file pointed to by "prefix", and
14727//     if found proceed to read it as a V2 checkpoint;
14728//   - Otherwise the V1 read path is invoked.
14729// Relying on this behavior is not recommended, as the ability to fall back to read
14730// V1 might be deprecated and eventually removed.
14731//
14732// By default, restores the named tensors in full.  If the caller wishes to restore
14733// specific slices of stored tensors, "shape_and_slices" should be non-empty
14734// strings and correspondingly well-formed.
14735//
14736// Callers must ensure all the named tensors are indeed stored in the checkpoint.
14737//
14738// Arguments:
14739//	prefix: Must have a single element.  The prefix of a V2 checkpoint.
14740//	tensor_names: shape {N}.  The names of the tensors to be restored.
14741//	shape_and_slices: shape {N}.  The slice specs of the tensors to be restored.
14742// Empty strings indicate that they are non-partitioned tensors.
14743//	dtypes: shape {N}.  The list of expected dtype for the tensors.  Must match
14744// those stored in the checkpoint.
14745//
14746// Returns shape {N}.  The restored tensors, whose shapes are read from the
14747// checkpoint directly.
14748func RestoreV2(scope *Scope, prefix tf.Output, tensor_names tf.Output, shape_and_slices tf.Output, dtypes []tf.DataType) (tensors []tf.Output) {
14749	if scope.Err() != nil {
14750		return
14751	}
14752	attrs := map[string]interface{}{"dtypes": dtypes}
14753	opspec := tf.OpSpec{
14754		Type: "RestoreV2",
14755		Input: []tf.Input{
14756			prefix, tensor_names, shape_and_slices,
14757		},
14758		Attrs: attrs,
14759	}
14760	op := scope.AddOperation(opspec)
14761	if scope.Err() != nil {
14762		return
14763	}
14764	var idx int
14765	var err error
14766	if tensors, idx, err = makeOutputList(op, idx, "tensors"); err != nil {
14767		scope.UpdateErr("RestoreV2", err)
14768		return
14769	}
14770	return tensors
14771}
14772
14773// Delete the TensorArray from its resource container.
14774//
14775// This enables the user to close and release the resource in the middle
14776// of a step/run.
14777//
14778// Arguments:
14779//	handle: The handle to a TensorArray (output of TensorArray or TensorArrayGrad).
14780//
14781// Returns the created operation.
14782func TensorArrayCloseV3(scope *Scope, handle tf.Output) (o *tf.Operation) {
14783	if scope.Err() != nil {
14784		return
14785	}
14786	opspec := tf.OpSpec{
14787		Type: "TensorArrayCloseV3",
14788		Input: []tf.Input{
14789			handle,
14790		},
14791	}
14792	return scope.AddOperation(opspec)
14793}
14794
14795// Saves tensors in V2 checkpoint format.
14796//
14797// By default, saves the named tensors in full.  If the caller wishes to save
14798// specific slices of full tensors, "shape_and_slices" should be non-empty strings
14799// and correspondingly well-formed.
14800//
14801// Arguments:
14802//	prefix: Must have a single element. The prefix of the V2 checkpoint to which we
14803// write the tensors.
14804//	tensor_names: shape {N}. The names of the tensors to be saved.
14805//	shape_and_slices: shape {N}.  The slice specs of the tensors to be saved.
14806// Empty strings indicate that they are non-partitioned tensors.
14807//	tensors: `N` tensors to save.
14808//
14809// Returns the created operation.
14810func SaveV2(scope *Scope, prefix tf.Output, tensor_names tf.Output, shape_and_slices tf.Output, tensors []tf.Output) (o *tf.Operation) {
14811	if scope.Err() != nil {
14812		return
14813	}
14814	opspec := tf.OpSpec{
14815		Type: "SaveV2",
14816		Input: []tf.Input{
14817			prefix, tensor_names, shape_and_slices, tf.OutputList(tensors),
14818		},
14819	}
14820	return scope.AddOperation(opspec)
14821}
14822
14823// SparseCountSparseOutputAttr is an optional argument to SparseCountSparseOutput.
14824type SparseCountSparseOutputAttr func(optionalAttr)
14825
14826// SparseCountSparseOutputMinlength sets the optional minlength attribute to value.
14827//
14828// value: Minimum value to count. Can be set to -1 for no minimum.
14829// If not specified, defaults to -1
14830//
14831// REQUIRES: value >= -1
14832func SparseCountSparseOutputMinlength(value int64) SparseCountSparseOutputAttr {
14833	return func(m optionalAttr) {
14834		m["minlength"] = value
14835	}
14836}
14837
14838// SparseCountSparseOutputMaxlength sets the optional maxlength attribute to value.
14839//
14840// value: Maximum value to count. Can be set to -1 for no maximum.
14841// If not specified, defaults to -1
14842//
14843// REQUIRES: value >= -1
14844func SparseCountSparseOutputMaxlength(value int64) SparseCountSparseOutputAttr {
14845	return func(m optionalAttr) {
14846		m["maxlength"] = value
14847	}
14848}
14849
14850// Performs sparse-output bin counting for a sparse tensor input.
14851//
14852//   Counts the number of times each value occurs in the input.
14853//
14854// Arguments:
14855//	indices: Tensor containing the indices of the sparse tensor to count.
14856//	values: Tensor containing values of the sparse tensor to count.
14857//	dense_shape: Tensor containing the dense shape of the sparse tensor to count.
14858//	weights: A Tensor of the same shape as indices containing per-index weight values.
14859// May also be the empty tensor if no weights are used.
14860//	binary_output: Whether to output the number of occurrences of each value or 1.
14861//
14862// Returns:
14863//	output_indices: Indices tensor for the resulting sparse tensor object.
14864//	output_values: Values tensor for the resulting sparse tensor object.
14865//	output_dense_shape: Shape tensor for the resulting sparse tensor object.
14866func SparseCountSparseOutput(scope *Scope, indices tf.Output, values tf.Output, dense_shape tf.Output, weights tf.Output, binary_output bool, optional ...SparseCountSparseOutputAttr) (output_indices tf.Output, output_values tf.Output, output_dense_shape tf.Output) {
14867	if scope.Err() != nil {
14868		return
14869	}
14870	attrs := map[string]interface{}{"binary_output": binary_output}
14871	for _, a := range optional {
14872		a(attrs)
14873	}
14874	opspec := tf.OpSpec{
14875		Type: "SparseCountSparseOutput",
14876		Input: []tf.Input{
14877			indices, values, dense_shape, weights,
14878		},
14879		Attrs: attrs,
14880	}
14881	op := scope.AddOperation(opspec)
14882	return op.Output(0), op.Output(1), op.Output(2)
14883}
14884
14885// DebugNumericSummaryV2Attr is an optional argument to DebugNumericSummaryV2.
14886type DebugNumericSummaryV2Attr func(optionalAttr)
14887
14888// DebugNumericSummaryV2OutputDtype sets the optional output_dtype attribute to value.
14889//
14890// value: Optional. The type of the output. Can be float32 or float64 (default: float32).
14891// If not specified, defaults to DT_FLOAT
14892func DebugNumericSummaryV2OutputDtype(value tf.DataType) DebugNumericSummaryV2Attr {
14893	return func(m optionalAttr) {
14894		m["output_dtype"] = value
14895	}
14896}
14897
14898// DebugNumericSummaryV2TensorDebugMode sets the optional tensor_debug_mode attribute to value.
14899//
14900// value: Tensor debug mode: the mode in which the input tensor is summarized
14901//   by the op. See the TensorDebugMode enum in
14902//   tensorflow/core/protobuf/debug_event.proto for details.
14903//
14904// Supported values:
14905//   2 (CURT_HEALTH): Output a float32/64 tensor of shape [2]. The 1st
14906//   element is the tensor_id, if provided, and -1 otherwise. The 2nd
14907//   element is a bit which is set to 1 if the input tensor has an
14908//   infinity or nan value, or zero otherwise.
14909//
14910//   3 (CONCISE_HEALTH): Output a float32/64 tensor of shape [5]. The 1st
14911//   element is the tensor_id, if provided, and -1 otherwise. The
14912//   remaining four slots are the total number of elements, -infs,
14913//   +infs, and nans in the input tensor respectively.
14914//
14915//   4 (FULL_HEALTH): Output a float32/64 tensor of shape [11]. The 1st
14916//   element is the tensor_id, if provided, and -1 otherwise. The 2nd
14917//   element is the device_id, if provided, and -1 otherwise. The 3rd
14918//   element holds the datatype value of the input tensor as according
14919//   to the enumerated type in tensorflow/core/framework/types.proto.
14920//   The remaining elements hold the total number of elements, -infs,
14921//   +infs, nans, negative finite numbers, zeros, and positive finite
14922//   numbers in the input tensor respectively.
14923//
14924//   5 (SHAPE): Output a float32/64 tensor of shape [10]. The 1st
14925//   element is the tensor_id, if provided, and -1 otherwise. The 2nd
14926//   element holds the datatype value of the input tensor as according
14927//   to the enumerated type in tensorflow/core/framework/types.proto.
14928//   The 3rd element holds the rank of the tensor. The 4th element holds
14929//   the number of elements within the tensor. Finally the remaining 6
14930//   elements hold the shape of the tensor. If the rank of the tensor
14931//   is lower than 6, the shape is right padded with zeros. If the rank
14932//   is greater than 6, the head of the shape is truncated.
14933//
14934//   6 (FULL_NUMERICS): Output a float32/64 tensor of shape [22]. The 1st
14935//   element is the tensor_id, if provided, and -1 otherwise. The 2nd
14936//   element is the device_id, if provided, and -1 otherwise. The 3rd
14937//   element holds the datatype value of the input tensor as according
14938//   to the enumerated type in tensorflow/core/framework/types.proto.
14939//   The 4th element holds the rank of the tensor. The 5th to 11th
14940//   elements hold the shape of the tensor. If the rank of the tensor
14941//   is lower than 6, the shape is right padded with zeros. If the rank
14942//   is greater than 6, the head of the shape is truncated. The 12th to
14943//   18th elements hold the number of elements, -infs, +infs, nans,
14944//   denormal floats, negative finite numbers, zeros, and positive
14945//   finite numbers in the input tensor respectively. The final four
14946//   elements hold the min value, max value, mean, and variance of the
14947//   input tensor.
14948//
14949//   8 (REDUCE_INF_NAN_THREE_SLOTS): Output a float32/64 tensor of shape
14950//   [3]. The 1st element is -inf if any elements of the input tensor
14951//   is -inf, or zero otherwise. The 2nd element is +inf if any elements
14952//   of the input tensor is +inf, or zero otherwise.  The 3rd element is
14953//   nan if any element of the input tensor is nan, or zero otherwise.
14954// If not specified, defaults to -1
14955func DebugNumericSummaryV2TensorDebugMode(value int64) DebugNumericSummaryV2Attr {
14956	return func(m optionalAttr) {
14957		m["tensor_debug_mode"] = value
14958	}
14959}
14960
14961// DebugNumericSummaryV2TensorId sets the optional tensor_id attribute to value.
14962//
14963// value: Optional. An integer identifier for the tensor being summarized by this op.
14964// If not specified, defaults to -1
14965func DebugNumericSummaryV2TensorId(value int64) DebugNumericSummaryV2Attr {
14966	return func(m optionalAttr) {
14967		m["tensor_id"] = value
14968	}
14969}
14970
14971// Debug Numeric Summary V2 Op.
14972//
14973// Computes a numeric summary of the input tensor. The shape of the output
14974// depends on the tensor_debug_mode attribute.
14975// This op is used internally by TensorFlow Debugger (tfdbg) v2.
14976//
14977// Arguments:
14978//	input: Input tensor, to be summarized by the op.
14979func DebugNumericSummaryV2(scope *Scope, input tf.Output, optional ...DebugNumericSummaryV2Attr) (output tf.Output) {
14980	if scope.Err() != nil {
14981		return
14982	}
14983	attrs := map[string]interface{}{}
14984	for _, a := range optional {
14985		a(attrs)
14986	}
14987	opspec := tf.OpSpec{
14988		Type: "DebugNumericSummaryV2",
14989		Input: []tf.Input{
14990			input,
14991		},
14992		Attrs: attrs,
14993	}
14994	op := scope.AddOperation(opspec)
14995	return op.Output(0)
14996}
14997
14998// Returns the cardinality of `input_dataset`.
14999//
15000// Returns the cardinality of `input_dataset`.
15001//
15002// Arguments:
15003//	input_dataset: A variant tensor representing the dataset to return cardinality for.
15004//
15005// Returns The cardinality of `input_dataset`. Named constants are used to represent
15006// infinite and unknown cardinality.
15007func DatasetCardinality(scope *Scope, input_dataset tf.Output) (cardinality tf.Output) {
15008	if scope.Err() != nil {
15009		return
15010	}
15011	opspec := tf.OpSpec{
15012		Type: "DatasetCardinality",
15013		Input: []tf.Input{
15014			input_dataset,
15015		},
15016	}
15017	op := scope.AddOperation(opspec)
15018	return op.Output(0)
15019}
15020
15021// Calculate product with tridiagonal matrix.
15022//
15023// Calculates product of two matrices, where left matrix is a tridiagonal matrix.
15024//
15025// Arguments:
15026//	superdiag: Tensor of shape `[..., 1, M]`, representing superdiagonals of
15027// tri-diagonal matrices to the left of multiplication. Last element is ignored.
15028//	maindiag: Tensor of shape `[..., 1, M]`, representing main diagonals of tri-diagonal
15029// matrices to the left of multiplication.
15030//	subdiag: Tensor of shape `[..., 1, M]`, representing subdiagonals of tri-diagonal
15031// matrices to the left of multiplication. First element is ignored.
15032//	rhs: Tensor of shape `[..., M, N]`, representing MxN matrices to the right of
15033// multiplication.
15034//
15035// Returns Tensor of shape `[..., M, N]` containing the product.
15036func TridiagonalMatMul(scope *Scope, superdiag tf.Output, maindiag tf.Output, subdiag tf.Output, rhs tf.Output) (output tf.Output) {
15037	if scope.Err() != nil {
15038		return
15039	}
15040	opspec := tf.OpSpec{
15041		Type: "TridiagonalMatMul",
15042		Input: []tf.Input{
15043			superdiag, maindiag, subdiag, rhs,
15044		},
15045	}
15046	op := scope.AddOperation(opspec)
15047	return op.Output(0)
15048}
15049
15050// CollectiveBcastRecvAttr is an optional argument to CollectiveBcastRecv.
15051type CollectiveBcastRecvAttr func(optionalAttr)
15052
15053// CollectiveBcastRecvCommunicationHint sets the optional communication_hint attribute to value.
15054// If not specified, defaults to "auto"
15055func CollectiveBcastRecvCommunicationHint(value string) CollectiveBcastRecvAttr {
15056	return func(m optionalAttr) {
15057		m["communication_hint"] = value
15058	}
15059}
15060
15061// CollectiveBcastRecvTimeoutSeconds sets the optional timeout_seconds attribute to value.
15062// If not specified, defaults to 0
15063func CollectiveBcastRecvTimeoutSeconds(value float32) CollectiveBcastRecvAttr {
15064	return func(m optionalAttr) {
15065		m["timeout_seconds"] = value
15066	}
15067}
15068
15069// Receives a tensor value broadcast from another device.
15070func CollectiveBcastRecv(scope *Scope, T tf.DataType, group_size int64, group_key int64, instance_key int64, shape tf.Shape, optional ...CollectiveBcastRecvAttr) (data tf.Output) {
15071	if scope.Err() != nil {
15072		return
15073	}
15074	attrs := map[string]interface{}{"T": T, "group_size": group_size, "group_key": group_key, "instance_key": instance_key, "shape": shape}
15075	for _, a := range optional {
15076		a(attrs)
15077	}
15078	opspec := tf.OpSpec{
15079		Type: "CollectiveBcastRecv",
15080
15081		Attrs: attrs,
15082	}
15083	op := scope.AddOperation(opspec)
15084	return op.Output(0)
15085}
15086
15087// Scatter the data from the input value into specific TensorArray elements.
15088//
15089// `indices` must be a vector, its length must match the first dim of `value`.
15090//
15091// Arguments:
15092//	handle: The handle to a TensorArray.
15093//	indices: The locations at which to write the tensor elements.
15094//	value: The concatenated tensor to write to the TensorArray.
15095//	flow_in: A float scalar that enforces proper chaining of operations.
15096//
15097// Returns A float scalar that enforces proper chaining of operations.
15098func TensorArrayScatterV3(scope *Scope, handle tf.Output, indices tf.Output, value tf.Output, flow_in tf.Output) (flow_out tf.Output) {
15099	if scope.Err() != nil {
15100		return
15101	}
15102	opspec := tf.OpSpec{
15103		Type: "TensorArrayScatterV3",
15104		Input: []tf.Input{
15105			handle, indices, value, flow_in,
15106		},
15107	}
15108	op := scope.AddOperation(opspec)
15109	return op.Output(0)
15110}
15111
15112// Computes the matrix square root of one or more square matrices:
15113//
15114// matmul(sqrtm(A), sqrtm(A)) = A
15115//
15116// The input matrix should be invertible. If the input matrix is real, it should
15117// have no eigenvalues which are real and negative (pairs of complex conjugate
15118// eigenvalues are allowed).
15119//
15120// The matrix square root is computed by first reducing the matrix to
15121// quasi-triangular form with the real Schur decomposition. The square root
15122// of the quasi-triangular matrix is then computed directly. Details of
15123// the algorithm can be found in: Nicholas J. Higham, "Computing real
15124// square roots of a real matrix", Linear Algebra Appl., 1987.
15125//
15126// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
15127// form square matrices. The output is a tensor of the same shape as the input
15128// containing the matrix square root for all input submatrices `[..., :, :]`.
15129//
15130// Arguments:
15131//	input: Shape is `[..., M, M]`.
15132//
15133// Returns Shape is `[..., M, M]`.
15134//
15135// @compatibility(scipy)
15136// Equivalent to scipy.linalg.sqrtm
15137// @end_compatibility
15138func MatrixSquareRoot(scope *Scope, input tf.Output) (output tf.Output) {
15139	if scope.Err() != nil {
15140		return
15141	}
15142	opspec := tf.OpSpec{
15143		Type: "MatrixSquareRoot",
15144		Input: []tf.Input{
15145			input,
15146		},
15147	}
15148	op := scope.AddOperation(opspec)
15149	return op.Output(0)
15150}
15151
15152// Pads a tensor with mirrored values.
15153//
15154// This operation pads a `input` with mirrored values according to the `paddings`
15155// you specify. `paddings` is an integer tensor with shape `[n, 2]`, where n is
15156// the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
15157// how many values to add before the contents of `input` in that dimension, and
15158// `paddings[D, 1]` indicates how many values to add after the contents of `input`
15159// in that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater
15160// than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if `copy_border` is true
15161// (if false, respectively).
15162//
15163// The padded size of each dimension D of the output is:
15164//
15165// `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
15166//
15167// For example:
15168//
15169// ```
15170// # 't' is [[1, 2, 3], [4, 5, 6]].
15171// # 'paddings' is [[1, 1]], [2, 2]].
15172// # 'mode' is SYMMETRIC.
15173// # rank of 't' is 2.
15174// pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2]
15175//                       [2, 1, 1, 2, 3, 3, 2]
15176//                       [5, 4, 4, 5, 6, 6, 5]
15177//                       [5, 4, 4, 5, 6, 6, 5]]
15178// ```
15179//
15180// Arguments:
15181//	input: The input tensor to be padded.
15182//	paddings: A two-column matrix specifying the padding sizes. The number of
15183// rows must be the same as the rank of `input`.
15184//	mode: Either `REFLECT` or `SYMMETRIC`. In reflect mode the padded regions
15185// do not include the borders, while in symmetric mode the padded regions
15186// do include the borders. For example, if `input` is `[1, 2, 3]` and `paddings`
15187// is `[0, 2]`, then the output is `[1, 2, 3, 2, 1]` in reflect mode, and
15188// it is `[1, 2, 3, 3, 2]` in symmetric mode.
15189//
15190// Returns The padded tensor.
15191func MirrorPad(scope *Scope, input tf.Output, paddings tf.Output, mode string) (output tf.Output) {
15192	if scope.Err() != nil {
15193		return
15194	}
15195	attrs := map[string]interface{}{"mode": mode}
15196	opspec := tf.OpSpec{
15197		Type: "MirrorPad",
15198		Input: []tf.Input{
15199			input, paddings,
15200		},
15201		Attrs: attrs,
15202	}
15203	op := scope.AddOperation(opspec)
15204	return op.Output(0)
15205}
15206
15207// TensorArrayV3Attr is an optional argument to TensorArrayV3.
15208type TensorArrayV3Attr func(optionalAttr)
15209
15210// TensorArrayV3ElementShape sets the optional element_shape attribute to value.
15211//
15212// value: The expected shape of an element, if known. Used to
15213// validate the shapes of TensorArray elements. If this shape is not
15214// fully specified, gathering zero-size TensorArrays is an error.
15215// If not specified, defaults to <unknown_rank:true >
15216func TensorArrayV3ElementShape(value tf.Shape) TensorArrayV3Attr {
15217	return func(m optionalAttr) {
15218		m["element_shape"] = value
15219	}
15220}
15221
15222// TensorArrayV3DynamicSize sets the optional dynamic_size attribute to value.
15223//
15224// value: A boolean that determines whether writes to the TensorArray
15225// are allowed to grow the size.  By default, this is not allowed.
15226// If not specified, defaults to false
15227func TensorArrayV3DynamicSize(value bool) TensorArrayV3Attr {
15228	return func(m optionalAttr) {
15229		m["dynamic_size"] = value
15230	}
15231}
15232
15233// TensorArrayV3ClearAfterRead sets the optional clear_after_read attribute to value.
15234//
15235// value: If true (default), Tensors in the TensorArray are cleared
15236// after being read.  This disables multiple read semantics but allows early
15237// release of memory.
15238// If not specified, defaults to true
15239func TensorArrayV3ClearAfterRead(value bool) TensorArrayV3Attr {
15240	return func(m optionalAttr) {
15241		m["clear_after_read"] = value
15242	}
15243}
15244
15245// TensorArrayV3IdenticalElementShapes sets the optional identical_element_shapes attribute to value.
15246//
15247// value: If true (default is false), then all
15248// elements in the TensorArray will be expected to have identical shapes.
15249// This allows certain behaviors, like dynamically checking for
15250// consistent shapes on write, and being able to fill in properly
15251// shaped zero tensors on stack -- even if the element_shape attribute
15252// is not fully defined.
15253// If not specified, defaults to false
15254func TensorArrayV3IdenticalElementShapes(value bool) TensorArrayV3Attr {
15255	return func(m optionalAttr) {
15256		m["identical_element_shapes"] = value
15257	}
15258}
15259
15260// TensorArrayV3TensorArrayName sets the optional tensor_array_name attribute to value.
15261//
15262// value: Overrides the name used for the temporary tensor_array
15263// resource. Default value is the name of the 'TensorArray' op (which
15264// is guaranteed unique).
15265// If not specified, defaults to ""
15266func TensorArrayV3TensorArrayName(value string) TensorArrayV3Attr {
15267	return func(m optionalAttr) {
15268		m["tensor_array_name"] = value
15269	}
15270}
15271
15272// An array of Tensors of given size.
15273//
15274// Write data via Write and read via Read or Pack.
15275//
15276// Arguments:
15277//	size: The size of the array.
15278//	dtype: The type of the elements on the tensor_array.
15279//
15280// Returns:
15281//	handle: The handle to the TensorArray.
15282//	flow: A scalar used to control gradient flow.
15283func TensorArrayV3(scope *Scope, size tf.Output, dtype tf.DataType, optional ...TensorArrayV3Attr) (handle tf.Output, flow tf.Output) {
15284	if scope.Err() != nil {
15285		return
15286	}
15287	attrs := map[string]interface{}{"dtype": dtype}
15288	for _, a := range optional {
15289		a(attrs)
15290	}
15291	opspec := tf.OpSpec{
15292		Type: "TensorArrayV3",
15293		Input: []tf.Input{
15294			size,
15295		},
15296		Attrs: attrs,
15297	}
15298	op := scope.AddOperation(opspec)
15299	return op.Output(0), op.Output(1)
15300}
15301
15302// MatrixSolveLsAttr is an optional argument to MatrixSolveLs.
15303type MatrixSolveLsAttr func(optionalAttr)
15304
15305// MatrixSolveLsFast sets the optional fast attribute to value.
15306// If not specified, defaults to true
15307func MatrixSolveLsFast(value bool) MatrixSolveLsAttr {
15308	return func(m optionalAttr) {
15309		m["fast"] = value
15310	}
15311}
15312
15313// Solves one or more linear least-squares problems.
15314//
15315// `matrix` is a tensor of shape `[..., M, N]` whose inner-most 2 dimensions
15316// form real or complex matrices of size `[M, N]`. `Rhs` is a tensor of the same
15317// type as `matrix` and shape `[..., M, K]`.
15318// The output is a tensor shape `[..., N, K]` where each output matrix solves
15319// each of the equations
15320// `matrix[..., :, :]` * `output[..., :, :]` = `rhs[..., :, :]`
15321// in the least squares sense.
15322//
15323// We use the following notation for (complex) matrix and right-hand sides
15324// in the batch:
15325//
15326// `matrix`=\\(A \in \mathbb{C}^{m \times n}\\),
15327// `rhs`=\\(B  \in \mathbb{C}^{m \times k}\\),
15328// `output`=\\(X  \in \mathbb{C}^{n \times k}\\),
15329// `l2_regularizer`=\\(\lambda \in \mathbb{R}\\).
15330//
15331// If `fast` is `True`, then the solution is computed by solving the normal
15332// equations using Cholesky decomposition. Specifically, if \\(m \ge n\\) then
15333// \\(X = (A^H A + \lambda I)^{-1} A^H B\\), which solves the least-squares
15334// problem \\(X = \mathrm{argmin}_{Z \in \Re^{n \times k} } ||A Z - B||_F^2 + \lambda ||Z||_F^2\\).
15335// If \\(m \lt n\\) then `output` is computed as
15336// \\(X = A^H (A A^H + \lambda I)^{-1} B\\), which (for \\(\lambda = 0\\)) is the
15337// minimum-norm solution to the under-determined linear system, i.e.
15338// \\(X = \mathrm{argmin}_{Z \in \mathbb{C}^{n \times k} } ||Z||_F^2 \\),
15339// subject to \\(A Z = B\\). Notice that the fast path is only numerically stable
15340// when \\(A\\) is numerically full rank and has a condition number
15341// \\(\mathrm{cond}(A) \lt \frac{1}{\sqrt{\epsilon_{mach} } }\\) or \\(\lambda\\) is
15342// sufficiently large.
15343//
15344// If `fast` is `False` an algorithm based on the numerically robust complete
15345// orthogonal decomposition is used. This computes the minimum-norm
15346// least-squares solution, even when \\(A\\) is rank deficient. This path is
15347// typically 6-7 times slower than the fast path. If `fast` is `False` then
15348// `l2_regularizer` is ignored.
15349//
15350// Arguments:
15351//	matrix: Shape is `[..., M, N]`.
15352//	rhs: Shape is `[..., M, K]`.
15353//	l2_regularizer: Scalar tensor.
15354//
15355// @compatibility(numpy)
15356// Equivalent to np.linalg.lstsq
15357// @end_compatibility
15358//
15359// Returns Shape is `[..., N, K]`.
15360func MatrixSolveLs(scope *Scope, matrix tf.Output, rhs tf.Output, l2_regularizer tf.Output, optional ...MatrixSolveLsAttr) (output tf.Output) {
15361	if scope.Err() != nil {
15362		return
15363	}
15364	attrs := map[string]interface{}{}
15365	for _, a := range optional {
15366		a(attrs)
15367	}
15368	opspec := tf.OpSpec{
15369		Type: "MatrixSolveLs",
15370		Input: []tf.Input{
15371			matrix, rhs, l2_regularizer,
15372		},
15373		Attrs: attrs,
15374	}
15375	op := scope.AddOperation(opspec)
15376	return op.Output(0)
15377}
15378
15379// MatrixTriangularSolveAttr is an optional argument to MatrixTriangularSolve.
15380type MatrixTriangularSolveAttr func(optionalAttr)
15381
15382// MatrixTriangularSolveLower sets the optional lower attribute to value.
15383//
15384// value: Boolean indicating whether the innermost matrices in `matrix` are
15385// lower or upper triangular.
15386// If not specified, defaults to true
15387func MatrixTriangularSolveLower(value bool) MatrixTriangularSolveAttr {
15388	return func(m optionalAttr) {
15389		m["lower"] = value
15390	}
15391}
15392
15393// MatrixTriangularSolveAdjoint sets the optional adjoint attribute to value.
15394//
15395// value: Boolean indicating whether to solve with `matrix` or its (block-wise)
15396//          adjoint.
15397//
15398// @compatibility(numpy)
15399// Equivalent to scipy.linalg.solve_triangular
15400// @end_compatibility
15401// If not specified, defaults to false
15402func MatrixTriangularSolveAdjoint(value bool) MatrixTriangularSolveAttr {
15403	return func(m optionalAttr) {
15404		m["adjoint"] = value
15405	}
15406}
15407
15408// Solves systems of linear equations with upper or lower triangular matrices by backsubstitution.
15409//
15410//
15411// `matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form
15412// square matrices. If `lower` is `True` then the strictly upper triangular part
15413// of each inner-most matrix is assumed to be zero and not accessed.
15414// If `lower` is False then the strictly lower triangular part of each inner-most
15415// matrix is assumed to be zero and not accessed.
15416// `rhs` is a tensor of shape `[..., M, N]`.
15417//
15418// The output is a tensor of shape `[..., M, N]`. If `adjoint` is
15419// `True` then the innermost matrices in `output` satisfy matrix equations
15420// `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`.
15421// If `adjoint` is `False` then the strictly then the  innermost matrices in
15422// `output` satisfy matrix equations
15423// `adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`.
15424//
15425// Note, the batch shapes for the inputs only need to broadcast.
15426//
15427// Example:
15428// ```python
15429//
15430// a = tf.constant([[3,  0,  0,  0],
15431//                  [2,  1,  0,  0],
15432//                  [1,  0,  1,  0],
15433//                  [1,  1,  1,  1]], dtype=tf.float32)
15434//
15435// b = tf.constant([[4],
15436//                  [2],
15437//                  [4],
15438//                  [2]], dtype=tf.float32)
15439//
15440// x = tf.linalg.triangular_solve(a, b, lower=True)
15441// x
15442// # <tf.Tensor: shape=(4, 1), dtype=float32, numpy=
15443// # array([[ 1.3333334 ],
15444// #        [-0.66666675],
15445// #        [ 2.6666665 ],
15446// #        [-1.3333331 ]], dtype=float32)>
15447//
15448// # in python3 one can use `a@x`
15449// tf.matmul(a, x)
15450// # <tf.Tensor: shape=(4, 1), dtype=float32, numpy=
15451// # array([[4.       ],
15452// #        [2.       ],
15453// #        [4.       ],
15454// #        [1.9999999]], dtype=float32)>
15455// ```
15456//
15457// Arguments:
15458//	matrix: Shape is `[..., M, M]`.
15459//	rhs: Shape is `[..., M, K]`.
15460//
15461// Returns Shape is `[..., M, K]`.
15462func MatrixTriangularSolve(scope *Scope, matrix tf.Output, rhs tf.Output, optional ...MatrixTriangularSolveAttr) (output tf.Output) {
15463	if scope.Err() != nil {
15464		return
15465	}
15466	attrs := map[string]interface{}{}
15467	for _, a := range optional {
15468		a(attrs)
15469	}
15470	opspec := tf.OpSpec{
15471		Type: "MatrixTriangularSolve",
15472		Input: []tf.Input{
15473			matrix, rhs,
15474		},
15475		Attrs: attrs,
15476	}
15477	op := scope.AddOperation(opspec)
15478	return op.Output(0)
15479}
15480
15481// Applies sparse addition to `input` using individual values or slices
15482//
15483// from `updates` according to indices `indices`.  The updates are non-aliasing:
15484// `input` is only modified in-place if no other operations will use it.
15485// Otherwise, a copy of `input` is made.  This operation has a gradient with
15486// respect to both `input` and `updates`.
15487//
15488// `input` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
15489//
15490// `indices` must be integer tensor, containing indices into `input`.
15491// It must be shape \\([d_0, ..., d_{Q-2}, K]\\) where `0 < K <= P`.
15492//
15493// The innermost dimension of `indices` (with length `K`) corresponds to
15494// indices into elements (if `K = P`) or `(P-K)`-dimensional slices
15495// (if `K < P`) along the `K`th dimension of `input`.
15496//
15497// `updates` is `Tensor` of rank `Q-1+P-K` with shape:
15498//
15499// $$[d_0, ..., d_{Q-2}, input.shape[K], ..., input.shape[P-1]].$$
15500//
15501// For example, say we want to add 4 scattered elements to a rank-1 tensor to 8
15502// elements. In Python, that addition would look like this:
15503//
15504//     input = tf.constant([1, 2, 3, 4, 5, 6, 7, 8])
15505//     indices = tf.constant([[4], [3], [1], [7]])
15506//     updates = tf.constant([9, 10, 11, 12])
15507//     output = tf.scatter_nd_non_aliasing_add(input, indices, updates)
15508//     with tf.Session() as sess:
15509//       print(sess.run(output))
15510//
15511// The resulting value `output` would look like this:
15512//
15513//     [1, 13, 3, 14, 14, 6, 7, 20]
15514//
15515// See `tf.scatter_nd` for more details about how to make updates to slices.
15516//
15517// Arguments:
15518//	input: A Tensor.
15519//	indices: A Tensor. Must be one of the following types: `int32`, `int64`.
15520// A tensor of indices into `input`.
15521//	updates: A Tensor. Must have the same type as ref. A tensor of updated values
15522// to add to `input`.
15523//
15524// Returns A `Tensor` with the same shape as `input`, containing values of `input`
15525// updated with `updates`.
15526func ScatterNdNonAliasingAdd(scope *Scope, input tf.Output, indices tf.Output, updates tf.Output) (output tf.Output) {
15527	if scope.Err() != nil {
15528		return
15529	}
15530	opspec := tf.OpSpec{
15531		Type: "ScatterNdNonAliasingAdd",
15532		Input: []tf.Input{
15533			input, indices, updates,
15534		},
15535	}
15536	op := scope.AddOperation(opspec)
15537	return op.Output(0)
15538}
15539
15540// LuAttr is an optional argument to Lu.
15541type LuAttr func(optionalAttr)
15542
15543// LuOutputIdxType sets the optional output_idx_type attribute to value.
15544// If not specified, defaults to DT_INT32
15545func LuOutputIdxType(value tf.DataType) LuAttr {
15546	return func(m optionalAttr) {
15547		m["output_idx_type"] = value
15548	}
15549}
15550
15551// Computes the LU decomposition of one or more square matrices.
15552//
15553// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
15554// form square matrices.
15555//
15556// The input has to be invertible.
15557//
15558// The output consists of two tensors LU and P containing the LU decomposition
15559// of all input submatrices `[..., :, :]`. LU encodes the lower triangular and
15560// upper triangular factors.
15561//
15562// For each input submatrix of shape `[M, M]`, L is a lower triangular matrix of
15563// shape `[M, M]` with unit diagonal whose entries correspond to the strictly lower
15564// triangular part of LU. U is a upper triangular matrix of shape `[M, M]` whose
15565// entries correspond to the upper triangular part, including the diagonal, of LU.
15566//
15567// P represents a permutation matrix encoded as a list of indices each between `0`
15568// and `M-1`, inclusive. If P_mat denotes the permutation matrix corresponding to
15569// P, then the L, U and P satisfies P_mat * input = L * U.
15570//
15571// Arguments:
15572//	input: A tensor of shape `[..., M, M]` whose inner-most 2 dimensions form matrices of
15573// size `[M, M]`.
15574//
15575// Returns:
15576//	lu: A tensor of shape `[..., M, M]` whose strictly lower triangular part denotes the
15577// lower triangular factor `L` with unit diagonal, and whose upper triangular part
15578// denotes the upper triangular factor `U`.
15579//	p: Permutation of the rows encoded as a list of indices in `0..M-1`. Shape is
15580// `[..., M]`.
15581// @compatibility(scipy)
15582// Similar to `scipy.linalg.lu`, except the triangular factors `L` and `U` are
15583// packed into a single tensor, the permutation is applied to `input` instead of
15584// the right hand side and the permutation `P` is returned as a list of indices
15585// instead of a permutation matrix.
15586// @end_compatibility
15587func Lu(scope *Scope, input tf.Output, optional ...LuAttr) (lu tf.Output, p tf.Output) {
15588	if scope.Err() != nil {
15589		return
15590	}
15591	attrs := map[string]interface{}{}
15592	for _, a := range optional {
15593		a(attrs)
15594	}
15595	opspec := tf.OpSpec{
15596		Type: "Lu",
15597		Input: []tf.Input{
15598			input,
15599		},
15600		Attrs: attrs,
15601	}
15602	op := scope.AddOperation(opspec)
15603	return op.Output(0), op.Output(1)
15604}
15605
15606// SelfAdjointEigV2Attr is an optional argument to SelfAdjointEigV2.
15607type SelfAdjointEigV2Attr func(optionalAttr)
15608
15609// SelfAdjointEigV2ComputeV sets the optional compute_v attribute to value.
15610//
15611// value: If `True` then eigenvectors will be computed and returned in `v`.
15612// Otherwise, only the eigenvalues will be computed.
15613// If not specified, defaults to true
15614func SelfAdjointEigV2ComputeV(value bool) SelfAdjointEigV2Attr {
15615	return func(m optionalAttr) {
15616		m["compute_v"] = value
15617	}
15618}
15619
15620// Computes the eigen decomposition of one or more square self-adjoint matrices.
15621//
15622// Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in
15623// `input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`. The eigenvalues
15624// are sorted in non-decreasing order.
15625//
15626// ```python
15627// # a is a tensor.
15628// # e is a tensor of eigenvalues.
15629// # v is a tensor of eigenvectors.
15630// e, v = self_adjoint_eig(a)
15631// e = self_adjoint_eig(a, compute_v=False)
15632// ```
15633//
15634// Arguments:
15635//	input: `Tensor` input of shape `[N, N]`.
15636//
15637// Returns:
15638//	e: Eigenvalues. Shape is `[N]`.
15639//	v: Eigenvectors. Shape is `[N, N]`.
15640func SelfAdjointEigV2(scope *Scope, input tf.Output, optional ...SelfAdjointEigV2Attr) (e tf.Output, v tf.Output) {
15641	if scope.Err() != nil {
15642		return
15643	}
15644	attrs := map[string]interface{}{}
15645	for _, a := range optional {
15646		a(attrs)
15647	}
15648	opspec := tf.OpSpec{
15649		Type: "SelfAdjointEigV2",
15650		Input: []tf.Input{
15651			input,
15652		},
15653		Attrs: attrs,
15654	}
15655	op := scope.AddOperation(opspec)
15656	return op.Output(0), op.Output(1)
15657}
15658
15659// Computes the Eigen Decomposition of a batch of square self-adjoint matrices.
15660//
15661// DEPRECATED at GraphDef version 11: Use SelfAdjointEigV2 instead.
15662//
15663// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
15664// form square matrices, with the same constraints as the single matrix
15665// SelfAdjointEig.
15666//
15667// The result is a [..., M+1, M] matrix with [..., 0,:] containing the
15668// eigenvalues, and subsequent [...,1:, :] containing the eigenvectors. The eigenvalues
15669// are sorted in non-decreasing order.
15670//
15671// Arguments:
15672//	input: Shape is `[..., M, M]`.
15673//
15674// Returns Shape is `[..., M+1, M]`.
15675func SelfAdjointEig(scope *Scope, input tf.Output) (output tf.Output) {
15676	if scope.Err() != nil {
15677		return
15678	}
15679	opspec := tf.OpSpec{
15680		Type: "SelfAdjointEig",
15681		Input: []tf.Input{
15682			input,
15683		},
15684	}
15685	op := scope.AddOperation(opspec)
15686	return op.Output(0)
15687}
15688
15689// Computes the reverse mode backpropagated gradient of the Cholesky algorithm.
15690//
15691// For an explanation see "Differentiation of the Cholesky algorithm" by
15692// Iain Murray http://arxiv.org/abs/1602.07527.
15693//
15694// Arguments:
15695//	l: Output of batch Cholesky algorithm l = cholesky(A). Shape is `[..., M, M]`.
15696// Algorithm depends only on lower triangular part of the innermost matrices of
15697// this tensor.
15698//	grad: df/dl where f is some scalar function. Shape is `[..., M, M]`.
15699// Algorithm depends only on lower triangular part of the innermost matrices of
15700// this tensor.
15701//
15702// Returns Symmetrized version of df/dA . Shape is `[..., M, M]`
15703func CholeskyGrad(scope *Scope, l tf.Output, grad tf.Output) (output tf.Output) {
15704	if scope.Err() != nil {
15705		return
15706	}
15707	opspec := tf.OpSpec{
15708		Type: "CholeskyGrad",
15709		Input: []tf.Input{
15710			l, grad,
15711		},
15712	}
15713	op := scope.AddOperation(opspec)
15714	return op.Output(0)
15715}
15716
15717// Deprecated, use python implementation tf.linalg.matrix_exponential.
15718//
15719// DEPRECATED at GraphDef version 27: Use Python implementation tf.linalg.matrix_exponential instead.
15720func MatrixExponential(scope *Scope, input tf.Output) (output tf.Output) {
15721	if scope.Err() != nil {
15722		return
15723	}
15724	opspec := tf.OpSpec{
15725		Type: "MatrixExponential",
15726		Input: []tf.Input{
15727			input,
15728		},
15729	}
15730	op := scope.AddOperation(opspec)
15731	return op.Output(0)
15732}
15733
15734// Creates a dataset that emits the key-value pairs in one or more LMDB files.
15735//
15736// The Lightning Memory-Mapped Database Manager, or LMDB, is an embedded binary
15737// key-value database. This dataset can read the contents of LMDB database files,
15738// the names of which generally have the `.mdb` suffix.
15739//
15740// Each output element consists of a key-value pair represented as a pair of
15741// scalar string `Tensor`s, where the first `Tensor` contains the key and the
15742// second `Tensor` contains the value.
15743//
15744// LMDB uses different file formats on big- and little-endian machines.
15745// `LMDBDataset` can only read files in the format of the host machine.
15746//
15747// Arguments:
15748//	filenames: A scalar or a vector containing the name(s) of the binary file(s) to be
15749// read.
15750//
15751//
15752func LMDBDataset(scope *Scope, filenames tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
15753	if scope.Err() != nil {
15754		return
15755	}
15756	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
15757	opspec := tf.OpSpec{
15758		Type: "LMDBDataset",
15759		Input: []tf.Input{
15760			filenames,
15761		},
15762		Attrs: attrs,
15763	}
15764	op := scope.AddOperation(opspec)
15765	return op.Output(0)
15766}
15767
15768// MatrixInverseAttr is an optional argument to MatrixInverse.
15769type MatrixInverseAttr func(optionalAttr)
15770
15771// MatrixInverseAdjoint sets the optional adjoint attribute to value.
15772// If not specified, defaults to false
15773func MatrixInverseAdjoint(value bool) MatrixInverseAttr {
15774	return func(m optionalAttr) {
15775		m["adjoint"] = value
15776	}
15777}
15778
15779// Computes the inverse of one or more square invertible matrices or their adjoints (conjugate transposes).
15780//
15781//
15782// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
15783// form square matrices. The output is a tensor of the same shape as the input
15784// containing the inverse for all input submatrices `[..., :, :]`.
15785//
15786// The op uses LU decomposition with partial pivoting to compute the inverses.
15787//
15788// If a matrix is not invertible there is no guarantee what the op does. It
15789// may detect the condition and raise an exception or it may simply return a
15790// garbage result.
15791//
15792// Arguments:
15793//	input: Shape is `[..., M, M]`.
15794//
15795// Returns Shape is `[..., M, M]`.
15796//
15797// @compatibility(numpy)
15798// Equivalent to np.linalg.inv
15799// @end_compatibility
15800func MatrixInverse(scope *Scope, input tf.Output, optional ...MatrixInverseAttr) (output tf.Output) {
15801	if scope.Err() != nil {
15802		return
15803	}
15804	attrs := map[string]interface{}{}
15805	for _, a := range optional {
15806		a(attrs)
15807	}
15808	opspec := tf.OpSpec{
15809		Type: "MatrixInverse",
15810		Input: []tf.Input{
15811			input,
15812		},
15813		Attrs: attrs,
15814	}
15815	op := scope.AddOperation(opspec)
15816	return op.Output(0)
15817}
15818
15819// Computes the sign and the log of the absolute value of the determinant of
15820//
15821// one or more square matrices.
15822//
15823// The input is a tensor of shape `[N, M, M]` whose inner-most 2 dimensions
15824// form square matrices. The outputs are two tensors containing the signs and
15825// absolute values of the log determinants for all N input submatrices
15826// `[..., :, :]` such that `determinant = sign*exp(log_abs_determinant)`.
15827// The `log_abs_determinant` is computed as `det(P)*sum(log(diag(LU)))` where `LU`
15828// is the `LU` decomposition of the input and `P` is the corresponding
15829// permutation matrix.
15830//
15831// Arguments:
15832//	input: Shape is `[N, M, M]`.
15833//
15834// Returns:
15835//	sign: The signs of the log determinants of the inputs. Shape is `[N]`.
15836//	log_abs_determinant: The logs of the absolute values of the determinants
15837// of the N input matrices.  Shape is `[N]`.
15838func LogMatrixDeterminant(scope *Scope, input tf.Output) (sign tf.Output, log_abs_determinant tf.Output) {
15839	if scope.Err() != nil {
15840		return
15841	}
15842	opspec := tf.OpSpec{
15843		Type: "LogMatrixDeterminant",
15844		Input: []tf.Input{
15845			input,
15846		},
15847	}
15848	op := scope.AddOperation(opspec)
15849	return op.Output(0), op.Output(1)
15850}
15851
15852// Computes the determinant of one or more square matrices.
15853//
15854// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
15855// form square matrices. The output is a tensor containing the determinants
15856// for all input submatrices `[..., :, :]`.
15857//
15858// Arguments:
15859//	input: Shape is `[..., M, M]`.
15860//
15861// Returns Shape is `[...]`.
15862func MatrixDeterminant(scope *Scope, input tf.Output) (output tf.Output) {
15863	if scope.Err() != nil {
15864		return
15865	}
15866	opspec := tf.OpSpec{
15867		Type: "MatrixDeterminant",
15868		Input: []tf.Input{
15869			input,
15870		},
15871	}
15872	op := scope.AddOperation(opspec)
15873	return op.Output(0)
15874}
15875
15876// Creates a TensorList by indexing into a Tensor.
15877//
15878// Each member of the TensorList corresponds to one row of the input tensor,
15879// specified by the given index (see `tf.gather`).
15880//
15881// tensor: The input tensor.
15882// indices: The indices used to index into the list.
15883// element_shape: The shape of the elements in the list (can be less specified than
15884//   the shape of the tensor).
15885// num_elements: The size of the output list. Must be large enough to accommodate
15886//   the largest index in indices. If -1, the list is just large enough to include
15887//   the largest index in indices.
15888// output_handle: The TensorList.
15889func TensorListScatterV2(scope *Scope, tensor tf.Output, indices tf.Output, element_shape tf.Output, num_elements tf.Output) (output_handle tf.Output) {
15890	if scope.Err() != nil {
15891		return
15892	}
15893	opspec := tf.OpSpec{
15894		Type: "TensorListScatterV2",
15895		Input: []tf.Input{
15896			tensor, indices, element_shape, num_elements,
15897		},
15898	}
15899	op := scope.AddOperation(opspec)
15900	return op.Output(0)
15901}
15902
15903// Creates a TensorList by indexing into a Tensor.
15904//
15905// Each member of the TensorList corresponds to one row of the input tensor,
15906// specified by the given index (see `tf.gather`).
15907//
15908// tensor: The input tensor.
15909// indices: The indices used to index into the list.
15910// element_shape: The shape of the elements in the list (can be less specified than
15911//   the shape of the tensor).
15912// output_handle: The TensorList.
15913func TensorListScatter(scope *Scope, tensor tf.Output, indices tf.Output, element_shape tf.Output) (output_handle tf.Output) {
15914	if scope.Err() != nil {
15915		return
15916	}
15917	opspec := tf.OpSpec{
15918		Type: "TensorListScatter",
15919		Input: []tf.Input{
15920			tensor, indices, element_shape,
15921		},
15922	}
15923	op := scope.AddOperation(opspec)
15924	return op.Output(0)
15925}
15926
15927// Sets the index-th position of the list to contain the given tensor.
15928//
15929// input_handle: the list
15930// index: the position in the list to which the tensor will be assigned
15931// item: the element to be assigned to that position
15932// output_handle: the new list, with the element in the proper position
15933//
15934func TensorListSetItem(scope *Scope, input_handle tf.Output, index tf.Output, item tf.Output) (output_handle tf.Output) {
15935	if scope.Err() != nil {
15936		return
15937	}
15938	opspec := tf.OpSpec{
15939		Type: "TensorListSetItem",
15940		Input: []tf.Input{
15941			input_handle, index, item,
15942		},
15943	}
15944	op := scope.AddOperation(opspec)
15945	return op.Output(0)
15946}
15947
15948// Returns the item in the list with the given index.
15949//
15950// input_handle: the list
15951// index: the position in the list from which an element will be retrieved
15952// item: the element at that position
15953//
15954//
15955func TensorListGetItem(scope *Scope, input_handle tf.Output, index tf.Output, element_shape tf.Output, element_dtype tf.DataType) (item tf.Output) {
15956	if scope.Err() != nil {
15957		return
15958	}
15959	attrs := map[string]interface{}{"element_dtype": element_dtype}
15960	opspec := tf.OpSpec{
15961		Type: "TensorListGetItem",
15962		Input: []tf.Input{
15963			input_handle, index, element_shape,
15964		},
15965		Attrs: attrs,
15966	}
15967	op := scope.AddOperation(opspec)
15968	return op.Output(0)
15969}
15970
15971// The shape of the elements of the given list, as a tensor.
15972//
15973//   input_handle: the list
15974//   element_shape: the shape of elements of the list
15975func TensorListElementShape(scope *Scope, input_handle tf.Output, shape_type tf.DataType) (element_shape tf.Output) {
15976	if scope.Err() != nil {
15977		return
15978	}
15979	attrs := map[string]interface{}{"shape_type": shape_type}
15980	opspec := tf.OpSpec{
15981		Type: "TensorListElementShape",
15982		Input: []tf.Input{
15983			input_handle,
15984		},
15985		Attrs: attrs,
15986	}
15987	op := scope.AddOperation(opspec)
15988	return op.Output(0)
15989}
15990
15991// ExperimentalThreadPoolHandleAttr is an optional argument to ExperimentalThreadPoolHandle.
15992type ExperimentalThreadPoolHandleAttr func(optionalAttr)
15993
15994// ExperimentalThreadPoolHandleMaxIntraOpParallelism sets the optional max_intra_op_parallelism attribute to value.
15995//
15996// value: The maximum degree of parallelism to use within operations that execute on this
15997// threadpool.
15998// If not specified, defaults to 1
15999func ExperimentalThreadPoolHandleMaxIntraOpParallelism(value int64) ExperimentalThreadPoolHandleAttr {
16000	return func(m optionalAttr) {
16001		m["max_intra_op_parallelism"] = value
16002	}
16003}
16004
16005// ExperimentalThreadPoolHandleContainer sets the optional container attribute to value.
16006// If not specified, defaults to ""
16007func ExperimentalThreadPoolHandleContainer(value string) ExperimentalThreadPoolHandleAttr {
16008	return func(m optionalAttr) {
16009		m["container"] = value
16010	}
16011}
16012
16013// ExperimentalThreadPoolHandleSharedName sets the optional shared_name attribute to value.
16014// If not specified, defaults to ""
16015func ExperimentalThreadPoolHandleSharedName(value string) ExperimentalThreadPoolHandleAttr {
16016	return func(m optionalAttr) {
16017		m["shared_name"] = value
16018	}
16019}
16020
16021// Creates a dataset that uses a custom thread pool to compute `input_dataset`.
16022//
16023// Arguments:
16024//	num_threads: The number of threads in the thread pool.
16025//	display_name: A human-readable name for the threads that may be visible in some
16026// visualizations.
16027// threadpool.
16028//
16029// Returns A resource that can be consumed by one or more ExperimentalThreadPoolDataset
16030// ops.
16031func ExperimentalThreadPoolHandle(scope *Scope, num_threads int64, display_name string, optional ...ExperimentalThreadPoolHandleAttr) (handle tf.Output) {
16032	if scope.Err() != nil {
16033		return
16034	}
16035	attrs := map[string]interface{}{"num_threads": num_threads, "display_name": display_name}
16036	for _, a := range optional {
16037		a(attrs)
16038	}
16039	opspec := tf.OpSpec{
16040		Type: "ExperimentalThreadPoolHandle",
16041
16042		Attrs: attrs,
16043	}
16044	op := scope.AddOperation(opspec)
16045	return op.Output(0)
16046}
16047
16048// Creates a TensorList which, when stacked, has the value of `tensor`.
16049//
16050// Each tensor in the result list corresponds to one row of the input tensor.
16051//
16052// tensor: The input tensor.
16053// output_handle: The list.
16054func TensorListFromTensor(scope *Scope, tensor tf.Output, element_shape tf.Output) (output_handle tf.Output) {
16055	if scope.Err() != nil {
16056		return
16057	}
16058	opspec := tf.OpSpec{
16059		Type: "TensorListFromTensor",
16060		Input: []tf.Input{
16061			tensor, element_shape,
16062		},
16063	}
16064	op := scope.AddOperation(opspec)
16065	return op.Output(0)
16066}
16067
16068// Splits a tensor into a list.
16069//
16070// list[i] corresponds to lengths[i] tensors from the input tensor.
16071// The tensor must have rank at least 1 and contain exactly sum(lengths) elements.
16072//
16073// tensor: The input tensor.
16074// element_shape: A shape compatible with that of elements in the tensor.
16075// lengths: Vector of sizes of the 0th dimension of tensors in the list.
16076// output_handle: The list.
16077func TensorListSplit(scope *Scope, tensor tf.Output, element_shape tf.Output, lengths tf.Output) (output_handle tf.Output) {
16078	if scope.Err() != nil {
16079		return
16080	}
16081	opspec := tf.OpSpec{
16082		Type: "TensorListSplit",
16083		Input: []tf.Input{
16084			tensor, element_shape, lengths,
16085		},
16086	}
16087	op := scope.AddOperation(opspec)
16088	return op.Output(0)
16089}
16090
16091// TensorListStackAttr is an optional argument to TensorListStack.
16092type TensorListStackAttr func(optionalAttr)
16093
16094// TensorListStackNumElements sets the optional num_elements attribute to value.
16095// If not specified, defaults to -1
16096func TensorListStackNumElements(value int64) TensorListStackAttr {
16097	return func(m optionalAttr) {
16098		m["num_elements"] = value
16099	}
16100}
16101
16102// Stacks all tensors in the list.
16103//
16104// Requires that all tensors have the same shape.
16105//
16106// input_handle: the input list
16107// tensor: the gathered result
16108// num_elements: optional. If not -1, the number of elements in the list.
16109//
16110func TensorListStack(scope *Scope, input_handle tf.Output, element_shape tf.Output, element_dtype tf.DataType, optional ...TensorListStackAttr) (tensor tf.Output) {
16111	if scope.Err() != nil {
16112		return
16113	}
16114	attrs := map[string]interface{}{"element_dtype": element_dtype}
16115	for _, a := range optional {
16116		a(attrs)
16117	}
16118	opspec := tf.OpSpec{
16119		Type: "TensorListStack",
16120		Input: []tf.Input{
16121			input_handle, element_shape,
16122		},
16123		Attrs: attrs,
16124	}
16125	op := scope.AddOperation(opspec)
16126	return op.Output(0)
16127}
16128
16129// Returns the number of tensors in the input tensor list.
16130//
16131// input_handle: the input list
16132// length: the number of tensors in the list
16133func TensorListLength(scope *Scope, input_handle tf.Output) (length tf.Output) {
16134	if scope.Err() != nil {
16135		return
16136	}
16137	opspec := tf.OpSpec{
16138		Type: "TensorListLength",
16139		Input: []tf.Input{
16140			input_handle,
16141		},
16142	}
16143	op := scope.AddOperation(opspec)
16144	return op.Output(0)
16145}
16146
16147// Does nothing. Serves as a control trigger for scheduling.
16148//
16149// Only useful as a placeholder for control edges.
16150//
16151// Returns the created operation.
16152func ControlTrigger(scope *Scope) (o *tf.Operation) {
16153	if scope.Err() != nil {
16154		return
16155	}
16156	opspec := tf.OpSpec{
16157		Type: "ControlTrigger",
16158	}
16159	return scope.AddOperation(opspec)
16160}
16161
16162// Interleave the values from the `data` tensors into a single tensor.
16163//
16164// Builds a merged tensor such that
16165//
16166// ```python
16167//     merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
16168// ```
16169//
16170// For example, if each `indices[m]` is scalar or vector, we have
16171//
16172// ```python
16173//     # Scalar indices:
16174//     merged[indices[m], ...] = data[m][...]
16175//
16176//     # Vector indices:
16177//     merged[indices[m][i], ...] = data[m][i, ...]
16178// ```
16179//
16180// Each `data[i].shape` must start with the corresponding `indices[i].shape`,
16181// and the rest of `data[i].shape` must be constant w.r.t. `i`.  That is, we
16182// must have `data[i].shape = indices[i].shape + constant`.  In terms of this
16183// `constant`, the output shape is
16184//
16185//     merged.shape = [max(indices)] + constant
16186//
16187// Values may be merged in parallel, so if an index appears in both `indices[m][i]`
16188// and `indices[n][j]`, the result may be invalid. This differs from the normal
16189// DynamicStitch operator that defines the behavior in that case.
16190//
16191// For example:
16192//
16193// ```python
16194//     indices[0] = 6
16195//     indices[1] = [4, 1]
16196//     indices[2] = [[5, 2], [0, 3]]
16197//     data[0] = [61, 62]
16198//     data[1] = [[41, 42], [11, 12]]
16199//     data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
16200//     merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
16201//               [51, 52], [61, 62]]
16202// ```
16203//
16204// This method can be used to merge partitions created by `dynamic_partition`
16205// as illustrated on the following example:
16206//
16207// ```python
16208//     # Apply function (increments x_i) on elements for which a certain condition
16209//     # apply (x_i != -1 in this example).
16210//     x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])
16211//     condition_mask=tf.not_equal(x,tf.constant(-1.))
16212//     partitioned_data = tf.dynamic_partition(
16213//         x, tf.cast(condition_mask, tf.int32) , 2)
16214//     partitioned_data[1] = partitioned_data[1] + 1.0
16215//     condition_indices = tf.dynamic_partition(
16216//         tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)
16217//     x = tf.dynamic_stitch(condition_indices, partitioned_data)
16218//     # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain
16219//     # unchanged.
16220// ```
16221//
16222// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
16223// <img style="width:100%" src="https://www.tensorflow.org/images/DynamicStitch.png" alt>
16224// </div>
16225func ParallelDynamicStitch(scope *Scope, indices []tf.Output, data []tf.Output) (merged tf.Output) {
16226	if scope.Err() != nil {
16227		return
16228	}
16229	opspec := tf.OpSpec{
16230		Type: "ParallelDynamicStitch",
16231		Input: []tf.Input{
16232			tf.OutputList(indices), tf.OutputList(data),
16233		},
16234	}
16235	op := scope.AddOperation(opspec)
16236	return op.Output(0)
16237}
16238
16239// Returns a Tensor stack of all keys in a tensor map.
16240//
16241// input_handle: the input map
16242// keys: the returned Tensor of all keys in the map
16243func TensorMapStackKeys(scope *Scope, input_handle tf.Output, key_dtype tf.DataType) (keys tf.Output) {
16244	if scope.Err() != nil {
16245		return
16246	}
16247	attrs := map[string]interface{}{"key_dtype": key_dtype}
16248	opspec := tf.OpSpec{
16249		Type: "TensorMapStackKeys",
16250		Input: []tf.Input{
16251			input_handle,
16252		},
16253		Attrs: attrs,
16254	}
16255	op := scope.AddOperation(opspec)
16256	return op.Output(0)
16257}
16258
16259// Returns whether the given key exists in the map.
16260//
16261// input_handle: the input map
16262// key: the key to check
16263// has_key: whether the key is already in the map or not
16264func TensorMapHasKey(scope *Scope, input_handle tf.Output, key tf.Output) (has_key tf.Output) {
16265	if scope.Err() != nil {
16266		return
16267	}
16268	opspec := tf.OpSpec{
16269		Type: "TensorMapHasKey",
16270		Input: []tf.Input{
16271			input_handle, key,
16272		},
16273	}
16274	op := scope.AddOperation(opspec)
16275	return op.Output(0)
16276}
16277
16278// Inverse 3D fast Fourier transform.
16279//
16280// Computes the inverse 3-dimensional discrete Fourier transform over the
16281// inner-most 3 dimensions of `input`.
16282//
16283// Arguments:
16284//	input: A complex tensor.
16285//
16286// Returns A complex tensor of the same shape as `input`. The inner-most 3
16287//   dimensions of `input` are replaced with their inverse 3D Fourier transform.
16288//
16289// @compatibility(numpy)
16290// Equivalent to np.fft.ifftn with 3 dimensions.
16291// @end_compatibility
16292func IFFT3D(scope *Scope, input tf.Output) (output tf.Output) {
16293	if scope.Err() != nil {
16294		return
16295	}
16296	opspec := tf.OpSpec{
16297		Type: "IFFT3D",
16298		Input: []tf.Input{
16299			input,
16300		},
16301	}
16302	op := scope.AddOperation(opspec)
16303	return op.Output(0)
16304}
16305
16306// Returns a map that is the 'input_handle' with the given key-value pair inserted.
16307//
16308// input_handle: the original map
16309// output_handle: the map with key and value inserted
16310// key: the key to be inserted
16311// value: the value to be inserted
16312func TensorMapInsert(scope *Scope, input_handle tf.Output, key tf.Output, value tf.Output) (output_handle tf.Output) {
16313	if scope.Err() != nil {
16314		return
16315	}
16316	opspec := tf.OpSpec{
16317		Type: "TensorMapInsert",
16318		Input: []tf.Input{
16319			input_handle, key, value,
16320		},
16321	}
16322	op := scope.AddOperation(opspec)
16323	return op.Output(0)
16324}
16325
16326// Returns the value from a given key in a tensor map.
16327//
16328// input_handle: the input map
16329// key: the key to be looked up
16330// value: the value found from the given key
16331func TensorMapLookup(scope *Scope, input_handle tf.Output, key tf.Output, value_dtype tf.DataType) (value tf.Output) {
16332	if scope.Err() != nil {
16333		return
16334	}
16335	attrs := map[string]interface{}{"value_dtype": value_dtype}
16336	opspec := tf.OpSpec{
16337		Type: "TensorMapLookup",
16338		Input: []tf.Input{
16339			input_handle, key,
16340		},
16341		Attrs: attrs,
16342	}
16343	op := scope.AddOperation(opspec)
16344	return op.Output(0)
16345}
16346
16347// DecodeImageAttr is an optional argument to DecodeImage.
16348type DecodeImageAttr func(optionalAttr)
16349
16350// DecodeImageChannels sets the optional channels attribute to value.
16351//
16352// value: Number of color channels for the decoded image.
16353// If not specified, defaults to 0
16354func DecodeImageChannels(value int64) DecodeImageAttr {
16355	return func(m optionalAttr) {
16356		m["channels"] = value
16357	}
16358}
16359
16360// DecodeImageDtype sets the optional dtype attribute to value.
16361//
16362// value: The desired DType of the returned Tensor.
16363// If not specified, defaults to DT_UINT8
16364func DecodeImageDtype(value tf.DataType) DecodeImageAttr {
16365	return func(m optionalAttr) {
16366		m["dtype"] = value
16367	}
16368}
16369
16370// DecodeImageExpandAnimations sets the optional expand_animations attribute to value.
16371//
16372// value: Controls the output shape of the returned op. If True, the returned op will
16373// produce a 3-D tensor for PNG, JPEG, and BMP files; and a 4-D tensor for all
16374// GIFs, whether animated or not. If, False, the returned op will produce a 3-D
16375// tensor for all file types and will truncate animated GIFs to the first frame.
16376// If not specified, defaults to true
16377func DecodeImageExpandAnimations(value bool) DecodeImageAttr {
16378	return func(m optionalAttr) {
16379		m["expand_animations"] = value
16380	}
16381}
16382
16383// Function for decode_bmp, decode_gif, decode_jpeg, and decode_png.
16384//
16385// Detects whether an image is a BMP, GIF, JPEG, or PNG, and performs the
16386// appropriate operation to convert the input bytes string into a Tensor of type
16387// dtype.
16388//
16389// *NOTE*: decode_gif returns a 4-D array [num_frames, height, width, 3], as
16390// opposed to decode_bmp, decode_jpeg and decode_png, which return 3-D arrays
16391// [height, width, num_channels]. Make sure to take this into account when
16392// constructing your graph if you are intermixing GIF files with BMP, JPEG, and/or
16393// PNG files. Alternately, set the expand_animations argument of this function to
16394// False, in which case the op will return 3-dimensional tensors and will truncate
16395// animated GIF files to the first frame.
16396//
16397// *NOTE*: If the first frame of an animated GIF does not occupy the entire
16398// canvas (maximum frame width x maximum frame height), then it fills the
16399// unoccupied areas (in the first frame) with zeros (black). For frames after the
16400// first frame that does not occupy the entire canvas, it uses the previous
16401// frame to fill the unoccupied areas.
16402//
16403// Arguments:
16404//	contents: 0-D. The encoded image bytes.
16405//
16406// Returns 3-D with shape `[height, width, channels]` or 4-D with shape
16407// `[frame, height, width, channels]`..
16408func DecodeImage(scope *Scope, contents tf.Output, optional ...DecodeImageAttr) (image tf.Output) {
16409	if scope.Err() != nil {
16410		return
16411	}
16412	attrs := map[string]interface{}{}
16413	for _, a := range optional {
16414		a(attrs)
16415	}
16416	opspec := tf.OpSpec{
16417		Type: "DecodeImage",
16418		Input: []tf.Input{
16419			contents,
16420		},
16421		Attrs: attrs,
16422	}
16423	op := scope.AddOperation(opspec)
16424	return op.Output(0)
16425}
16426
16427// AvgPoolAttr is an optional argument to AvgPool.
16428type AvgPoolAttr func(optionalAttr)
16429
16430// AvgPoolDataFormat sets the optional data_format attribute to value.
16431//
16432// value: Specify the data format of the input and output data. With the
16433// default format "NHWC", the data is stored in the order of:
16434//     [batch, in_height, in_width, in_channels].
16435// Alternatively, the format could be "NCHW", the data storage order of:
16436//     [batch, in_channels, in_height, in_width].
16437// If not specified, defaults to "NHWC"
16438func AvgPoolDataFormat(value string) AvgPoolAttr {
16439	return func(m optionalAttr) {
16440		m["data_format"] = value
16441	}
16442}
16443
16444// Performs average pooling on the input.
16445//
16446// Each entry in `output` is the mean of the corresponding size `ksize`
16447// window in `value`.
16448//
16449// Arguments:
16450//	value: 4-D with shape `[batch, height, width, channels]`.
16451//	ksize: The size of the sliding window for each dimension of `value`.
16452//	strides: The stride of the sliding window for each dimension of `value`.
16453//	padding: The type of padding algorithm to use.
16454//
16455// Returns The average pooled output tensor.
16456func AvgPool(scope *Scope, value tf.Output, ksize []int64, strides []int64, padding string, optional ...AvgPoolAttr) (output tf.Output) {
16457	if scope.Err() != nil {
16458		return
16459	}
16460	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
16461	for _, a := range optional {
16462		a(attrs)
16463	}
16464	opspec := tf.OpSpec{
16465		Type: "AvgPool",
16466		Input: []tf.Input{
16467			value,
16468		},
16469		Attrs: attrs,
16470	}
16471	op := scope.AddOperation(opspec)
16472	return op.Output(0)
16473}
16474
16475// AudioSummaryV2Attr is an optional argument to AudioSummaryV2.
16476type AudioSummaryV2Attr func(optionalAttr)
16477
16478// AudioSummaryV2MaxOutputs sets the optional max_outputs attribute to value.
16479//
16480// value: Max number of batch elements to generate audio for.
16481// If not specified, defaults to 3
16482//
16483// REQUIRES: value >= 1
16484func AudioSummaryV2MaxOutputs(value int64) AudioSummaryV2Attr {
16485	return func(m optionalAttr) {
16486		m["max_outputs"] = value
16487	}
16488}
16489
16490// Outputs a `Summary` protocol buffer with audio.
16491//
16492// The summary has up to `max_outputs` summary values containing audio. The
16493// audio is built from `tensor` which must be 3-D with shape `[batch_size,
16494// frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
16495// assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`.
16496//
16497// The `tag` argument is a scalar `Tensor` of type `string`.  It is used to
16498// build the `tag` of the summary values:
16499//
16500// *  If `max_outputs` is 1, the summary value tag is '*tag*/audio'.
16501// *  If `max_outputs` is greater than 1, the summary value tags are
16502//    generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.
16503//
16504// Arguments:
16505//	tag: Scalar. Used to build the `tag` attribute of the summary values.
16506//	tensor: 2-D of shape `[batch_size, frames]`.
16507//	sample_rate: The sample rate of the signal in hertz.
16508//
16509// Returns Scalar. Serialized `Summary` protocol buffer.
16510func AudioSummaryV2(scope *Scope, tag tf.Output, tensor tf.Output, sample_rate tf.Output, optional ...AudioSummaryV2Attr) (summary tf.Output) {
16511	if scope.Err() != nil {
16512		return
16513	}
16514	attrs := map[string]interface{}{}
16515	for _, a := range optional {
16516		a(attrs)
16517	}
16518	opspec := tf.OpSpec{
16519		Type: "AudioSummaryV2",
16520		Input: []tf.Input{
16521			tag, tensor, sample_rate,
16522		},
16523		Attrs: attrs,
16524	}
16525	op := scope.AddOperation(opspec)
16526	return op.Output(0)
16527}
16528
16529// StringLengthAttr is an optional argument to StringLength.
16530type StringLengthAttr func(optionalAttr)
16531
16532// StringLengthUnit sets the optional unit attribute to value.
16533//
16534// value: The unit that is counted to compute string length.  One of: `"BYTE"` (for
16535// the number of bytes in each string) or `"UTF8_CHAR"` (for the number of UTF-8
16536// encoded Unicode code points in each string).  Results are undefined
16537// if `unit=UTF8_CHAR` and the `input` strings do not contain structurally
16538// valid UTF-8.
16539// If not specified, defaults to "BYTE"
16540func StringLengthUnit(value string) StringLengthAttr {
16541	return func(m optionalAttr) {
16542		m["unit"] = value
16543	}
16544}
16545
16546// String lengths of `input`.
16547//
16548// Computes the length of each string given in the input tensor.
16549//
16550// >>> strings = tf.constant(['Hello','TensorFlow', '\U0001F642'])
16551// >>> tf.strings.length(strings).numpy() # default counts bytes
16552// array([ 5, 10, 4], dtype=int32)
16553// >>> tf.strings.length(strings, unit="UTF8_CHAR").numpy()
16554// array([ 5, 10, 1], dtype=int32)
16555//
16556//
16557// Arguments:
16558//	input: The strings for which to compute the length for each element.
16559//
16560// Returns Integer tensor that has the same shape as `input`. The output contains the
16561// element-wise string lengths of `input`.
16562func StringLength(scope *Scope, input tf.Output, optional ...StringLengthAttr) (output tf.Output) {
16563	if scope.Err() != nil {
16564		return
16565	}
16566	attrs := map[string]interface{}{}
16567	for _, a := range optional {
16568		a(attrs)
16569	}
16570	opspec := tf.OpSpec{
16571		Type: "StringLength",
16572		Input: []tf.Input{
16573			input,
16574		},
16575		Attrs: attrs,
16576	}
16577	op := scope.AddOperation(opspec)
16578	return op.Output(0)
16579}
16580
16581// TensorSummaryAttr is an optional argument to TensorSummary.
16582type TensorSummaryAttr func(optionalAttr)
16583
16584// TensorSummaryDescription sets the optional description attribute to value.
16585//
16586// value: A json-encoded SummaryDescription proto.
16587// If not specified, defaults to ""
16588func TensorSummaryDescription(value string) TensorSummaryAttr {
16589	return func(m optionalAttr) {
16590		m["description"] = value
16591	}
16592}
16593
16594// TensorSummaryLabels sets the optional labels attribute to value.
16595//
16596// value: An unused list of strings.
16597// If not specified, defaults to <>
16598func TensorSummaryLabels(value []string) TensorSummaryAttr {
16599	return func(m optionalAttr) {
16600		m["labels"] = value
16601	}
16602}
16603
16604// TensorSummaryDisplayName sets the optional display_name attribute to value.
16605//
16606// value: An unused string.
16607// If not specified, defaults to ""
16608func TensorSummaryDisplayName(value string) TensorSummaryAttr {
16609	return func(m optionalAttr) {
16610		m["display_name"] = value
16611	}
16612}
16613
16614// Outputs a `Summary` protocol buffer with a tensor.
16615//
16616// This op is being phased out in favor of TensorSummaryV2, which lets callers pass
16617// a tag as well as a serialized SummaryMetadata proto string that contains
16618// plugin-specific data. We will keep this op to maintain backwards compatibility.
16619//
16620// Arguments:
16621//	tensor: A tensor to serialize.
16622func TensorSummary(scope *Scope, tensor tf.Output, optional ...TensorSummaryAttr) (summary tf.Output) {
16623	if scope.Err() != nil {
16624		return
16625	}
16626	attrs := map[string]interface{}{}
16627	for _, a := range optional {
16628		a(attrs)
16629	}
16630	opspec := tf.OpSpec{
16631		Type: "TensorSummary",
16632		Input: []tf.Input{
16633			tensor,
16634		},
16635		Attrs: attrs,
16636	}
16637	op := scope.AddOperation(opspec)
16638	return op.Output(0)
16639}
16640
16641// Outputs a `Summary` protocol buffer with a histogram.
16642//
16643// The generated
16644// [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
16645// has one summary value containing a histogram for `values`.
16646//
16647// This op reports an `InvalidArgument` error if any value is not finite.
16648//
16649// Arguments:
16650//	tag: Scalar.  Tag to use for the `Summary.Value`.
16651//	values: Any shape. Values to use to build the histogram.
16652//
16653// Returns Scalar. Serialized `Summary` protocol buffer.
16654func HistogramSummary(scope *Scope, tag tf.Output, values tf.Output) (summary tf.Output) {
16655	if scope.Err() != nil {
16656		return
16657	}
16658	opspec := tf.OpSpec{
16659		Type: "HistogramSummary",
16660		Input: []tf.Input{
16661			tag, values,
16662		},
16663	}
16664	op := scope.AddOperation(opspec)
16665	return op.Output(0)
16666}
16667
16668// Merges summaries.
16669//
16670// This op creates a
16671// [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
16672// protocol buffer that contains the union of all the values in the input
16673// summaries.
16674//
16675// When the Op is run, it reports an `InvalidArgument` error if multiple values
16676// in the summaries to merge use the same tag.
16677//
16678// Arguments:
16679//	inputs: Can be of any shape.  Each must contain serialized `Summary` protocol
16680// buffers.
16681//
16682// Returns Scalar. Serialized `Summary` protocol buffer.
16683func MergeSummary(scope *Scope, inputs []tf.Output) (summary tf.Output) {
16684	if scope.Err() != nil {
16685		return
16686	}
16687	opspec := tf.OpSpec{
16688		Type: "MergeSummary",
16689		Input: []tf.Input{
16690			tf.OutputList(inputs),
16691		},
16692	}
16693	op := scope.AddOperation(opspec)
16694	return op.Output(0)
16695}
16696
16697// Computes the gradient for the sqrt of `x` wrt its input.
16698//
16699// Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and `dy`
16700// is the corresponding input gradient.
16701func SqrtGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
16702	if scope.Err() != nil {
16703		return
16704	}
16705	opspec := tf.OpSpec{
16706		Type: "SqrtGrad",
16707		Input: []tf.Input{
16708			y, dy,
16709		},
16710	}
16711	op := scope.AddOperation(opspec)
16712	return op.Output(0)
16713}
16714
16715// MutableHashTableOfTensorsV2Attr is an optional argument to MutableHashTableOfTensorsV2.
16716type MutableHashTableOfTensorsV2Attr func(optionalAttr)
16717
16718// MutableHashTableOfTensorsV2Container sets the optional container attribute to value.
16719//
16720// value: If non-empty, this table is placed in the given container.
16721// Otherwise, a default container is used.
16722// If not specified, defaults to ""
16723func MutableHashTableOfTensorsV2Container(value string) MutableHashTableOfTensorsV2Attr {
16724	return func(m optionalAttr) {
16725		m["container"] = value
16726	}
16727}
16728
16729// MutableHashTableOfTensorsV2SharedName sets the optional shared_name attribute to value.
16730//
16731// value: If non-empty, this table is shared under the given name across
16732// multiple sessions.
16733// If not specified, defaults to ""
16734func MutableHashTableOfTensorsV2SharedName(value string) MutableHashTableOfTensorsV2Attr {
16735	return func(m optionalAttr) {
16736		m["shared_name"] = value
16737	}
16738}
16739
16740// MutableHashTableOfTensorsV2UseNodeNameSharing sets the optional use_node_name_sharing attribute to value.
16741// If not specified, defaults to false
16742func MutableHashTableOfTensorsV2UseNodeNameSharing(value bool) MutableHashTableOfTensorsV2Attr {
16743	return func(m optionalAttr) {
16744		m["use_node_name_sharing"] = value
16745	}
16746}
16747
16748// MutableHashTableOfTensorsV2ValueShape sets the optional value_shape attribute to value.
16749// If not specified, defaults to <>
16750func MutableHashTableOfTensorsV2ValueShape(value tf.Shape) MutableHashTableOfTensorsV2Attr {
16751	return func(m optionalAttr) {
16752		m["value_shape"] = value
16753	}
16754}
16755
16756// Creates an empty hash table.
16757//
16758// This op creates a mutable hash table, specifying the type of its keys and
16759// values. Each value must be a vector. Data can be inserted into the table using
16760// the insert operations. It does not support the initialization operation.
16761//
16762// Arguments:
16763//	key_dtype: Type of the table keys.
16764//	value_dtype: Type of the table values.
16765//
16766// Returns Handle to a table.
16767func MutableHashTableOfTensorsV2(scope *Scope, key_dtype tf.DataType, value_dtype tf.DataType, optional ...MutableHashTableOfTensorsV2Attr) (table_handle tf.Output) {
16768	if scope.Err() != nil {
16769		return
16770	}
16771	attrs := map[string]interface{}{"key_dtype": key_dtype, "value_dtype": value_dtype}
16772	for _, a := range optional {
16773		a(attrs)
16774	}
16775	opspec := tf.OpSpec{
16776		Type: "MutableHashTableOfTensorsV2",
16777
16778		Attrs: attrs,
16779	}
16780	op := scope.AddOperation(opspec)
16781	return op.Output(0)
16782}
16783
16784// Computes the grayscale dilation of 4-D `input` and 3-D `filter` tensors.
16785//
16786// The `input` tensor has shape `[batch, in_height, in_width, depth]` and the
16787// `filter` tensor has shape `[filter_height, filter_width, depth]`, i.e., each
16788// input channel is processed independently of the others with its own structuring
16789// function. The `output` tensor has shape
16790// `[batch, out_height, out_width, depth]`. The spatial dimensions of the output
16791// tensor depend on the `padding` algorithm. We currently only support the default
16792// "NHWC" `data_format`.
16793//
16794// In detail, the grayscale morphological 2-D dilation is the max-sum correlation
16795// (for consistency with `conv2d`, we use unmirrored filters):
16796//
16797//     output[b, y, x, c] =
16798//        max_{dy, dx} input[b,
16799//                           strides[1] * y + rates[1] * dy,
16800//                           strides[2] * x + rates[2] * dx,
16801//                           c] +
16802//                     filter[dy, dx, c]
16803//
16804// Max-pooling is a special case when the filter has size equal to the pooling
16805// kernel size and contains all zeros.
16806//
16807// Note on duality: The dilation of `input` by the `filter` is equal to the
16808// negation of the erosion of `-input` by the reflected `filter`.
16809//
16810// Arguments:
16811//	input: 4-D with shape `[batch, in_height, in_width, depth]`.
16812//	filter: 3-D with shape `[filter_height, filter_width, depth]`.
16813//	strides: The stride of the sliding window for each dimension of the input
16814// tensor. Must be: `[1, stride_height, stride_width, 1]`.
16815//	rates: The input stride for atrous morphological dilation. Must be:
16816// `[1, rate_height, rate_width, 1]`.
16817//	padding: The type of padding algorithm to use.
16818//
16819// Returns 4-D with shape `[batch, out_height, out_width, depth]`.
16820func Dilation2D(scope *Scope, input tf.Output, filter tf.Output, strides []int64, rates []int64, padding string) (output tf.Output) {
16821	if scope.Err() != nil {
16822		return
16823	}
16824	attrs := map[string]interface{}{"strides": strides, "rates": rates, "padding": padding}
16825	opspec := tf.OpSpec{
16826		Type: "Dilation2D",
16827		Input: []tf.Input{
16828			input, filter,
16829		},
16830		Attrs: attrs,
16831	}
16832	op := scope.AddOperation(opspec)
16833	return op.Output(0)
16834}
16835
16836// IsotonicRegressionAttr is an optional argument to IsotonicRegression.
16837type IsotonicRegressionAttr func(optionalAttr)
16838
16839// IsotonicRegressionOutputDtype sets the optional output_dtype attribute to value.
16840//
16841// value: Dtype of output.
16842// If not specified, defaults to DT_FLOAT
16843func IsotonicRegressionOutputDtype(value tf.DataType) IsotonicRegressionAttr {
16844	return func(m optionalAttr) {
16845		m["output_dtype"] = value
16846	}
16847}
16848
16849// Solves a batch of isotonic regression problems.
16850//
16851// Arguments:
16852//	input: A (batch_size, dim)-tensor holding a batch of inputs.
16853//
16854// Returns:
16855//	output: A (batch_size, dim)-tensor holding the per-batch element solutions.
16856//	segments: An int32 (batch_size, dim)-tensor with the segments.
16857func IsotonicRegression(scope *Scope, input tf.Output, optional ...IsotonicRegressionAttr) (output tf.Output, segments tf.Output) {
16858	if scope.Err() != nil {
16859		return
16860	}
16861	attrs := map[string]interface{}{}
16862	for _, a := range optional {
16863		a(attrs)
16864	}
16865	opspec := tf.OpSpec{
16866		Type: "IsotonicRegression",
16867		Input: []tf.Input{
16868			input,
16869		},
16870		Attrs: attrs,
16871	}
16872	op := scope.AddOperation(opspec)
16873	return op.Output(0), op.Output(1)
16874}
16875
16876// Computes softplus: `log(exp(features) + 1)`.
16877func Softplus(scope *Scope, features tf.Output) (activations tf.Output) {
16878	if scope.Err() != nil {
16879		return
16880	}
16881	opspec := tf.OpSpec{
16882		Type: "Softplus",
16883		Input: []tf.Input{
16884			features,
16885		},
16886	}
16887	op := scope.AddOperation(opspec)
16888	return op.Output(0)
16889}
16890
16891// MutableHashTableV2Attr is an optional argument to MutableHashTableV2.
16892type MutableHashTableV2Attr func(optionalAttr)
16893
16894// MutableHashTableV2Container sets the optional container attribute to value.
16895//
16896// value: If non-empty, this table is placed in the given container.
16897// Otherwise, a default container is used.
16898// If not specified, defaults to ""
16899func MutableHashTableV2Container(value string) MutableHashTableV2Attr {
16900	return func(m optionalAttr) {
16901		m["container"] = value
16902	}
16903}
16904
16905// MutableHashTableV2SharedName sets the optional shared_name attribute to value.
16906//
16907// value: If non-empty, this table is shared under the given name across
16908// multiple sessions.
16909// If not specified, defaults to ""
16910func MutableHashTableV2SharedName(value string) MutableHashTableV2Attr {
16911	return func(m optionalAttr) {
16912		m["shared_name"] = value
16913	}
16914}
16915
16916// MutableHashTableV2UseNodeNameSharing sets the optional use_node_name_sharing attribute to value.
16917//
16918// value: If true and shared_name is empty, the table is shared
16919// using the node name.
16920// If not specified, defaults to false
16921func MutableHashTableV2UseNodeNameSharing(value bool) MutableHashTableV2Attr {
16922	return func(m optionalAttr) {
16923		m["use_node_name_sharing"] = value
16924	}
16925}
16926
16927// Creates an empty hash table.
16928//
16929// This op creates a mutable hash table, specifying the type of its keys and
16930// values. Each value must be a scalar. Data can be inserted into the table using
16931// the insert operations. It does not support the initialization operation.
16932//
16933// Arguments:
16934//	key_dtype: Type of the table keys.
16935//	value_dtype: Type of the table values.
16936//
16937// Returns Handle to a table.
16938func MutableHashTableV2(scope *Scope, key_dtype tf.DataType, value_dtype tf.DataType, optional ...MutableHashTableV2Attr) (table_handle tf.Output) {
16939	if scope.Err() != nil {
16940		return
16941	}
16942	attrs := map[string]interface{}{"key_dtype": key_dtype, "value_dtype": value_dtype}
16943	for _, a := range optional {
16944		a(attrs)
16945	}
16946	opspec := tf.OpSpec{
16947		Type: "MutableHashTableV2",
16948
16949		Attrs: attrs,
16950	}
16951	op := scope.AddOperation(opspec)
16952	return op.Output(0)
16953}
16954
16955// Calculates the prior from the training data (the bias) and fills in the first node with the logits' prior. Returns a boolean indicating whether to continue centering.
16956//
16957// Arguments:
16958//	tree_ensemble_handle: Handle to the tree ensemble.
16959//	mean_gradients: A tensor with shape=[logits_dimension] with mean of gradients for a first node.
16960//	mean_hessians: A tensor with shape=[logits_dimension] mean of hessians for a first node.
16961//	l1: l1 regularization factor on leaf weights, per instance based.
16962//	l2: l2 regularization factor on leaf weights, per instance based.
16963//
16964// Returns Bool, whether to continue bias centering.
16965func BoostedTreesCenterBias(scope *Scope, tree_ensemble_handle tf.Output, mean_gradients tf.Output, mean_hessians tf.Output, l1 tf.Output, l2 tf.Output) (continue_centering tf.Output) {
16966	if scope.Err() != nil {
16967		return
16968	}
16969	opspec := tf.OpSpec{
16970		Type: "BoostedTreesCenterBias",
16971		Input: []tf.Input{
16972			tree_ensemble_handle, mean_gradients, mean_hessians, l1, l2,
16973		},
16974	}
16975	op := scope.AddOperation(opspec)
16976	return op.Output(0)
16977}
16978
16979// HashTableV2Attr is an optional argument to HashTableV2.
16980type HashTableV2Attr func(optionalAttr)
16981
16982// HashTableV2Container sets the optional container attribute to value.
16983//
16984// value: If non-empty, this table is placed in the given container.
16985// Otherwise, a default container is used.
16986// If not specified, defaults to ""
16987func HashTableV2Container(value string) HashTableV2Attr {
16988	return func(m optionalAttr) {
16989		m["container"] = value
16990	}
16991}
16992
16993// HashTableV2SharedName sets the optional shared_name attribute to value.
16994//
16995// value: If non-empty, this table is shared under the given name across
16996// multiple sessions.
16997// If not specified, defaults to ""
16998func HashTableV2SharedName(value string) HashTableV2Attr {
16999	return func(m optionalAttr) {
17000		m["shared_name"] = value
17001	}
17002}
17003
17004// HashTableV2UseNodeNameSharing sets the optional use_node_name_sharing attribute to value.
17005//
17006// value: If true and shared_name is empty, the table is shared
17007// using the node name.
17008// If not specified, defaults to false
17009func HashTableV2UseNodeNameSharing(value bool) HashTableV2Attr {
17010	return func(m optionalAttr) {
17011		m["use_node_name_sharing"] = value
17012	}
17013}
17014
17015// Creates a non-initialized hash table.
17016//
17017// This op creates a hash table, specifying the type of its keys and values.
17018// Before using the table you will have to initialize it.  After initialization the
17019// table will be immutable.
17020//
17021// Arguments:
17022//	key_dtype: Type of the table keys.
17023//	value_dtype: Type of the table values.
17024//
17025// Returns Handle to a table.
17026func HashTableV2(scope *Scope, key_dtype tf.DataType, value_dtype tf.DataType, optional ...HashTableV2Attr) (table_handle tf.Output) {
17027	if scope.Err() != nil {
17028		return
17029	}
17030	attrs := map[string]interface{}{"key_dtype": key_dtype, "value_dtype": value_dtype}
17031	for _, a := range optional {
17032		a(attrs)
17033	}
17034	opspec := tf.OpSpec{
17035		Type: "HashTableV2",
17036
17037		Attrs: attrs,
17038	}
17039	op := scope.AddOperation(opspec)
17040	return op.Output(0)
17041}
17042
17043// MatrixDiagV3Attr is an optional argument to MatrixDiagV3.
17044type MatrixDiagV3Attr func(optionalAttr)
17045
17046// MatrixDiagV3Align sets the optional align attribute to value.
17047//
17048// value: Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is
17049// a string specifying how superdiagonals and subdiagonals should be aligned,
17050// respectively. There are four possible alignments: "RIGHT_LEFT" (default),
17051// "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals
17052// to the right (left-pads the row) and subdiagonals to the left (right-pads the
17053// row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is
17054// the opposite alignment.
17055// If not specified, defaults to "RIGHT_LEFT"
17056func MatrixDiagV3Align(value string) MatrixDiagV3Attr {
17057	return func(m optionalAttr) {
17058		m["align"] = value
17059	}
17060}
17061
17062// Returns a batched diagonal tensor with given batched diagonal values.
17063//
17064// Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th
17065// diagonals of a matrix, with everything else padded with `padding`. `num_rows`
17066// and `num_cols` specify the dimension of the innermost matrix of the output. If
17067// both are not specified, the op assumes the innermost matrix is square and infers
17068// its size from `k` and the innermost dimension of `diagonal`. If only one of them
17069// is specified, the op assumes the unspecified value is the smallest possible
17070// based on other criteria.
17071//
17072// Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor has
17073// rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only one
17074// diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has rank
17075// `r` with shape `[I, J, ..., L, num_rows, num_cols]`.
17076//
17077// The second innermost dimension of `diagonal` has double meaning.
17078// When `k` is scalar or `k[0] == k[1]`, `M` is part of the batch size
17079// [I, J, ..., M], and the output tensor is:
17080//
17081// ```
17082// output[i, j, ..., l, m, n]
17083//   = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper
17084//     padding_value                             ; otherwise
17085// ```
17086//
17087// Otherwise, `M` is treated as the number of diagonals for the matrix in the
17088// same batch (`M = k[1]-k[0]+1`), and the output tensor is:
17089//
17090// ```
17091// output[i, j, ..., l, m, n]
17092//   = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]
17093//     padding_value                                     ; otherwise
17094// ```
17095// where `d = n - m`, `diag_index = [k] - d`, and
17096// `index_in_diag = n - max(d, 0) + offset`.
17097//
17098// `offset` is zero except when the alignment of the diagonal is to the right.
17099// ```
17100// offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}
17101//                                            and `d >= 0`) or
17102//                                          (`align` in {LEFT_RIGHT, RIGHT_RIGHT}
17103//                                            and `d <= 0`)
17104//          0                          ; otherwise
17105// ```
17106// where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.
17107//
17108// For example:
17109//
17110// ```
17111// # The main diagonal.
17112// diagonal = np.array([[1, 2, 3, 4],            # Input shape: (2, 4)
17113//                      [5, 6, 7, 8]])
17114// tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0],  # Output shape: (2, 4, 4)
17115//                                [0, 2, 0, 0],
17116//                                [0, 0, 3, 0],
17117//                                [0, 0, 0, 4]],
17118//                               [[5, 0, 0, 0],
17119//                                [0, 6, 0, 0],
17120//                                [0, 0, 7, 0],
17121//                                [0, 0, 0, 8]]]
17122//
17123// # A superdiagonal (per batch).
17124// diagonal = np.array([[1, 2, 3],  # Input shape: (2, 3)
17125//                      [4, 5, 6]])
17126// tf.matrix_diag(diagonal, k = 1)
17127//   ==> [[[0, 1, 0, 0],  # Output shape: (2, 4, 4)
17128//         [0, 0, 2, 0],
17129//         [0, 0, 0, 3],
17130//         [0, 0, 0, 0]],
17131//        [[0, 4, 0, 0],
17132//         [0, 0, 5, 0],
17133//         [0, 0, 0, 6],
17134//         [0, 0, 0, 0]]]
17135//
17136// # A tridiagonal band (per batch).
17137// diagonals = np.array([[[0, 8, 9],  # Input shape: (2, 2, 3)
17138//                        [1, 2, 3],
17139//                        [4, 5, 0]],
17140//                       [[0, 2, 3],
17141//                        [6, 7, 9],
17142//                        [9, 1, 0]]])
17143// tf.matrix_diag(diagonals, k = (-1, 1))
17144//   ==> [[[1, 8, 0],  # Output shape: (2, 3, 3)
17145//         [4, 2, 9],
17146//         [0, 5, 3]],
17147//        [[6, 2, 0],
17148//         [9, 7, 3],
17149//         [0, 1, 9]]]
17150//
17151// # LEFT_RIGHT alignment.
17152// diagonals = np.array([[[8, 9, 0],  # Input shape: (2, 2, 3)
17153//                        [1, 2, 3],
17154//                        [0, 4, 5]],
17155//                       [[2, 3, 0],
17156//                        [6, 7, 9],
17157//                        [0, 9, 1]]])
17158// tf.matrix_diag(diagonals, k = (-1, 1), align="LEFT_RIGHT")
17159//   ==> [[[1, 8, 0],  # Output shape: (2, 3, 3)
17160//         [4, 2, 9],
17161//         [0, 5, 3]],
17162//        [[6, 2, 0],
17163//         [9, 7, 3],
17164//         [0, 1, 9]]]
17165//
17166// # Rectangular matrix.
17167// diagonal = np.array([1, 2])  # Input shape: (2)
17168// tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4)
17169//   ==> [[0, 0, 0, 0],  # Output shape: (3, 4)
17170//        [1, 0, 0, 0],
17171//        [0, 2, 0, 0]]
17172//
17173// # Rectangular matrix with inferred num_cols and padding_value = 9.
17174// tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9)
17175//   ==> [[9, 9],  # Output shape: (3, 2)
17176//        [1, 9],
17177//        [9, 2]]
17178//
17179// ```
17180//
17181// Arguments:
17182//	diagonal: Rank `r`, where `r >= 1`
17183//	k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
17184// diagonal, and negative value means subdiagonals. `k` can be a single integer
17185// (for a single diagonal) or a pair of integers specifying the low and high ends
17186// of a matrix band. `k[0]` must not be larger than `k[1]`.
17187//	num_rows: The number of rows of the output matrix. If it is not provided, the op assumes
17188// the output matrix is a square matrix and infers the matrix size from k and the
17189// innermost dimension of `diagonal`.
17190//	num_cols: The number of columns of the output matrix. If it is not provided, the op
17191// assumes the output matrix is a square matrix and infers the matrix size from
17192// k and the innermost dimension of `diagonal`.
17193//	padding_value: The number to fill the area outside the specified diagonal band with.
17194// Default is 0.
17195//
17196// Returns Has rank `r+1` when `k` is an integer or `k[0] == k[1]`, rank `r` otherwise.
17197func MatrixDiagV3(scope *Scope, diagonal tf.Output, k tf.Output, num_rows tf.Output, num_cols tf.Output, padding_value tf.Output, optional ...MatrixDiagV3Attr) (output tf.Output) {
17198	if scope.Err() != nil {
17199		return
17200	}
17201	attrs := map[string]interface{}{}
17202	for _, a := range optional {
17203		a(attrs)
17204	}
17205	opspec := tf.OpSpec{
17206		Type: "MatrixDiagV3",
17207		Input: []tf.Input{
17208			diagonal, k, num_rows, num_cols, padding_value,
17209		},
17210		Attrs: attrs,
17211	}
17212	op := scope.AddOperation(opspec)
17213	return op.Output(0)
17214}
17215
17216// Greedily selects a subset of bounding boxes in descending order of score,
17217//
17218// pruning away boxes that have high overlaps
17219// with previously selected boxes.  Bounding boxes with score less than
17220// `score_threshold` are removed. N-by-n overlap values are supplied as square matrix,
17221// which allows for defining a custom overlap criterium (eg. intersection over union,
17222// intersection over area, etc.).
17223//
17224// The output of this operation is a set of integers indexing into the input
17225// collection of bounding boxes representing the selected boxes.  The bounding
17226// box coordinates corresponding to the selected indices can then be obtained
17227// using the `tf.gather operation`.  For example:
17228//
17229//   selected_indices = tf.image.non_max_suppression_with_overlaps(
17230//       overlaps, scores, max_output_size, overlap_threshold, score_threshold)
17231//   selected_boxes = tf.gather(boxes, selected_indices)
17232//
17233// Arguments:
17234//	overlaps: A 2-D float tensor of shape `[num_boxes, num_boxes]` representing
17235// the n-by-n box overlap values.
17236//	scores: A 1-D float tensor of shape `[num_boxes]` representing a single
17237// score corresponding to each box (each row of boxes).
17238//	max_output_size: A scalar integer tensor representing the maximum number of
17239// boxes to be selected by non max suppression.
17240//	overlap_threshold: A 0-D float tensor representing the threshold for deciding whether
17241// boxes overlap too.
17242//	score_threshold: A 0-D float tensor representing the threshold for deciding when to remove
17243// boxes based on score.
17244//
17245// Returns A 1-D integer tensor of shape `[M]` representing the selected
17246// indices from the boxes tensor, where `M <= max_output_size`.
17247func NonMaxSuppressionWithOverlaps(scope *Scope, overlaps tf.Output, scores tf.Output, max_output_size tf.Output, overlap_threshold tf.Output, score_threshold tf.Output) (selected_indices tf.Output) {
17248	if scope.Err() != nil {
17249		return
17250	}
17251	opspec := tf.OpSpec{
17252		Type: "NonMaxSuppressionWithOverlaps",
17253		Input: []tf.Input{
17254			overlaps, scores, max_output_size, overlap_threshold, score_threshold,
17255		},
17256	}
17257	op := scope.AddOperation(opspec)
17258	return op.Output(0)
17259}
17260
17261// Outputs all keys and values in the table.
17262//
17263// Arguments:
17264//	table_handle: Handle to the table.
17265//
17266//
17267//
17268// Returns:
17269//	keys: Vector of all keys present in the table.
17270//	values: Tensor of all values in the table. Indexed in parallel with `keys`.
17271func LookupTableExportV2(scope *Scope, table_handle tf.Output, Tkeys tf.DataType, Tvalues tf.DataType) (keys tf.Output, values tf.Output) {
17272	if scope.Err() != nil {
17273		return
17274	}
17275	attrs := map[string]interface{}{"Tkeys": Tkeys, "Tvalues": Tvalues}
17276	opspec := tf.OpSpec{
17277		Type: "LookupTableExportV2",
17278		Input: []tf.Input{
17279			table_handle,
17280		},
17281		Attrs: attrs,
17282	}
17283	op := scope.AddOperation(opspec)
17284	return op.Output(0), op.Output(1)
17285}
17286
17287// ParseSingleSequenceExampleAttr is an optional argument to ParseSingleSequenceExample.
17288type ParseSingleSequenceExampleAttr func(optionalAttr)
17289
17290// ParseSingleSequenceExampleContextSparseTypes sets the optional context_sparse_types attribute to value.
17291//
17292// value: A list of Ncontext_sparse types; the data types of data in
17293// each context Feature given in context_sparse_keys.
17294// Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),
17295// DT_INT64 (Int64List), and DT_STRING (BytesList).
17296// If not specified, defaults to <>
17297//
17298// REQUIRES: len(value) >= 0
17299func ParseSingleSequenceExampleContextSparseTypes(value []tf.DataType) ParseSingleSequenceExampleAttr {
17300	return func(m optionalAttr) {
17301		m["context_sparse_types"] = value
17302	}
17303}
17304
17305// ParseSingleSequenceExampleFeatureListDenseTypes sets the optional feature_list_dense_types attribute to value.
17306// If not specified, defaults to <>
17307//
17308// REQUIRES: len(value) >= 0
17309func ParseSingleSequenceExampleFeatureListDenseTypes(value []tf.DataType) ParseSingleSequenceExampleAttr {
17310	return func(m optionalAttr) {
17311		m["feature_list_dense_types"] = value
17312	}
17313}
17314
17315// ParseSingleSequenceExampleContextDenseShapes sets the optional context_dense_shapes attribute to value.
17316//
17317// value: A list of Ncontext_dense shapes; the shapes of data in
17318// each context Feature given in context_dense_keys.
17319// The number of elements in the Feature corresponding to context_dense_key[j]
17320// must always equal context_dense_shapes[j].NumEntries().
17321// The shape of context_dense_values[j] will match context_dense_shapes[j].
17322// If not specified, defaults to <>
17323//
17324// REQUIRES: len(value) >= 0
17325func ParseSingleSequenceExampleContextDenseShapes(value []tf.Shape) ParseSingleSequenceExampleAttr {
17326	return func(m optionalAttr) {
17327		m["context_dense_shapes"] = value
17328	}
17329}
17330
17331// ParseSingleSequenceExampleFeatureListSparseTypes sets the optional feature_list_sparse_types attribute to value.
17332//
17333// value: A list of Nfeature_list_sparse types; the data types
17334// of data in each FeatureList given in feature_list_sparse_keys.
17335// Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),
17336// DT_INT64 (Int64List), and DT_STRING (BytesList).
17337// If not specified, defaults to <>
17338//
17339// REQUIRES: len(value) >= 0
17340func ParseSingleSequenceExampleFeatureListSparseTypes(value []tf.DataType) ParseSingleSequenceExampleAttr {
17341	return func(m optionalAttr) {
17342		m["feature_list_sparse_types"] = value
17343	}
17344}
17345
17346// ParseSingleSequenceExampleFeatureListDenseShapes sets the optional feature_list_dense_shapes attribute to value.
17347//
17348// value: A list of Nfeature_list_dense shapes; the shapes of
17349// data in each FeatureList given in feature_list_dense_keys.
17350// The shape of each Feature in the FeatureList corresponding to
17351// feature_list_dense_key[j] must always equal
17352// feature_list_dense_shapes[j].NumEntries().
17353// If not specified, defaults to <>
17354//
17355// REQUIRES: len(value) >= 0
17356func ParseSingleSequenceExampleFeatureListDenseShapes(value []tf.Shape) ParseSingleSequenceExampleAttr {
17357	return func(m optionalAttr) {
17358		m["feature_list_dense_shapes"] = value
17359	}
17360}
17361
17362// Transforms a scalar brain.SequenceExample proto (as strings) into typed tensors.
17363//
17364// Arguments:
17365//	serialized: A scalar containing a binary serialized SequenceExample proto.
17366//	feature_list_dense_missing_assumed_empty: A vector listing the
17367// FeatureList keys which may be missing from the SequenceExample.  If the
17368// associated FeatureList is missing, it is treated as empty.  By default,
17369// any FeatureList not listed in this vector must exist in the SequenceExample.
17370//	context_sparse_keys: A list of Ncontext_sparse string Tensors (scalars).
17371// The keys expected in the Examples' features associated with context_sparse
17372// values.
17373//	context_dense_keys: A list of Ncontext_dense string Tensors (scalars).
17374// The keys expected in the SequenceExamples' context features associated with
17375// dense values.
17376//	feature_list_sparse_keys: A list of Nfeature_list_sparse string Tensors
17377// (scalars).  The keys expected in the FeatureLists associated with sparse
17378// values.
17379//	feature_list_dense_keys: A list of Nfeature_list_dense string Tensors (scalars).
17380// The keys expected in the SequenceExamples' feature_lists associated
17381// with lists of dense values.
17382//	context_dense_defaults: A list of Ncontext_dense Tensors (some may be empty).
17383// context_dense_defaults[j] provides default values
17384// when the SequenceExample's context map lacks context_dense_key[j].
17385// If an empty Tensor is provided for context_dense_defaults[j],
17386// then the Feature context_dense_keys[j] is required.
17387// The input type is inferred from context_dense_defaults[j], even when it's
17388// empty.  If context_dense_defaults[j] is not empty, its shape must match
17389// context_dense_shapes[j].
17390//	debug_name: A scalar containing the name of the serialized proto.
17391// May contain, for example, table key (descriptive) name for the
17392// corresponding serialized proto.  This is purely useful for debugging
17393// purposes, and the presence of values here has no effect on the output.
17394// May also be an empty scalar if no name is available.
17395func ParseSingleSequenceExample(scope *Scope, serialized tf.Output, feature_list_dense_missing_assumed_empty tf.Output, context_sparse_keys []tf.Output, context_dense_keys []tf.Output, feature_list_sparse_keys []tf.Output, feature_list_dense_keys []tf.Output, context_dense_defaults []tf.Output, debug_name tf.Output, optional ...ParseSingleSequenceExampleAttr) (context_sparse_indices []tf.Output, context_sparse_values []tf.Output, context_sparse_shapes []tf.Output, context_dense_values []tf.Output, feature_list_sparse_indices []tf.Output, feature_list_sparse_values []tf.Output, feature_list_sparse_shapes []tf.Output, feature_list_dense_values []tf.Output) {
17396	if scope.Err() != nil {
17397		return
17398	}
17399	attrs := map[string]interface{}{}
17400	for _, a := range optional {
17401		a(attrs)
17402	}
17403	opspec := tf.OpSpec{
17404		Type: "ParseSingleSequenceExample",
17405		Input: []tf.Input{
17406			serialized, feature_list_dense_missing_assumed_empty, tf.OutputList(context_sparse_keys), tf.OutputList(context_dense_keys), tf.OutputList(feature_list_sparse_keys), tf.OutputList(feature_list_dense_keys), tf.OutputList(context_dense_defaults), debug_name,
17407		},
17408		Attrs: attrs,
17409	}
17410	op := scope.AddOperation(opspec)
17411	if scope.Err() != nil {
17412		return
17413	}
17414	var idx int
17415	var err error
17416	if context_sparse_indices, idx, err = makeOutputList(op, idx, "context_sparse_indices"); err != nil {
17417		scope.UpdateErr("ParseSingleSequenceExample", err)
17418		return
17419	}
17420	if context_sparse_values, idx, err = makeOutputList(op, idx, "context_sparse_values"); err != nil {
17421		scope.UpdateErr("ParseSingleSequenceExample", err)
17422		return
17423	}
17424	if context_sparse_shapes, idx, err = makeOutputList(op, idx, "context_sparse_shapes"); err != nil {
17425		scope.UpdateErr("ParseSingleSequenceExample", err)
17426		return
17427	}
17428	if context_dense_values, idx, err = makeOutputList(op, idx, "context_dense_values"); err != nil {
17429		scope.UpdateErr("ParseSingleSequenceExample", err)
17430		return
17431	}
17432	if feature_list_sparse_indices, idx, err = makeOutputList(op, idx, "feature_list_sparse_indices"); err != nil {
17433		scope.UpdateErr("ParseSingleSequenceExample", err)
17434		return
17435	}
17436	if feature_list_sparse_values, idx, err = makeOutputList(op, idx, "feature_list_sparse_values"); err != nil {
17437		scope.UpdateErr("ParseSingleSequenceExample", err)
17438		return
17439	}
17440	if feature_list_sparse_shapes, idx, err = makeOutputList(op, idx, "feature_list_sparse_shapes"); err != nil {
17441		scope.UpdateErr("ParseSingleSequenceExample", err)
17442		return
17443	}
17444	if feature_list_dense_values, idx, err = makeOutputList(op, idx, "feature_list_dense_values"); err != nil {
17445		scope.UpdateErr("ParseSingleSequenceExample", err)
17446		return
17447	}
17448	return context_sparse_indices, context_sparse_values, context_sparse_shapes, context_dense_values, feature_list_sparse_indices, feature_list_sparse_values, feature_list_sparse_shapes, feature_list_dense_values
17449}
17450
17451// Check if the input matches the regex pattern.
17452//
17453// The input is a string tensor of any shape. The pattern is the
17454// regular expression to be matched with every element of the input tensor.
17455// The boolean values (True or False) of the output tensor indicate
17456// if the input matches the regex pattern provided.
17457//
17458// The pattern follows the re2 syntax (https://github.com/google/re2/wiki/Syntax)
17459//
17460// Arguments:
17461//	input: A string tensor of the text to be processed.
17462//	pattern: The regular expression to match the input.
17463//
17464// Returns A bool tensor with the same shape as `input`.
17465func StaticRegexFullMatch(scope *Scope, input tf.Output, pattern string) (output tf.Output) {
17466	if scope.Err() != nil {
17467		return
17468	}
17469	attrs := map[string]interface{}{"pattern": pattern}
17470	opspec := tf.OpSpec{
17471		Type: "StaticRegexFullMatch",
17472		Input: []tf.Input{
17473			input,
17474		},
17475		Attrs: attrs,
17476	}
17477	op := scope.AddOperation(opspec)
17478	return op.Output(0)
17479}
17480
17481// Computes the number of elements in the given table.
17482//
17483// Arguments:
17484//	table_handle: Handle to the table.
17485//
17486// Returns Scalar that contains number of elements in the table.
17487func LookupTableSizeV2(scope *Scope, table_handle tf.Output) (size tf.Output) {
17488	if scope.Err() != nil {
17489		return
17490	}
17491	opspec := tf.OpSpec{
17492		Type: "LookupTableSizeV2",
17493		Input: []tf.Input{
17494			table_handle,
17495		},
17496	}
17497	op := scope.AddOperation(opspec)
17498	return op.Output(0)
17499}
17500
17501// Computes inverse hyperbolic sine of x element-wise.
17502//
17503//   Given an input tensor, this function computes inverse hyperbolic sine
17504//   for every element in the tensor. Both input and output has a range of
17505//   `[-inf, inf]`.
17506//
17507//   ```python
17508//   x = tf.constant([-float("inf"), -2, -0.5, 1, 1.2, 200, 10000, float("inf")])
17509//   tf.math.asinh(x) ==> [-inf -1.4436355 -0.4812118 0.8813736 1.0159732 5.991471 9.903487 inf]
17510//   ```
17511func Asinh(scope *Scope, x tf.Output) (y tf.Output) {
17512	if scope.Err() != nil {
17513		return
17514	}
17515	opspec := tf.OpSpec{
17516		Type: "Asinh",
17517		Input: []tf.Input{
17518			x,
17519		},
17520	}
17521	op := scope.AddOperation(opspec)
17522	return op.Output(0)
17523}
17524
17525// Looks up keys in a table, outputs the corresponding values.
17526//
17527// The tensor `keys` must of the same type as the keys of the table.
17528// The output `values` is of the type of the table values.
17529//
17530// The scalar `default_value` is the value output for keys not present in the
17531// table. It must also be of the same type as the table values.
17532//
17533// Arguments:
17534//	table_handle: Handle to the table.
17535//	keys: Any shape.  Keys to look up.
17536//
17537//
17538// Returns Same shape as `keys`.  Values found in the table, or `default_values`
17539// for missing keys.
17540func LookupTableFindV2(scope *Scope, table_handle tf.Output, keys tf.Output, default_value tf.Output) (values tf.Output) {
17541	if scope.Err() != nil {
17542		return
17543	}
17544	opspec := tf.OpSpec{
17545		Type: "LookupTableFindV2",
17546		Input: []tf.Input{
17547			table_handle, keys, default_value,
17548		},
17549	}
17550	op := scope.AddOperation(opspec)
17551	return op.Output(0)
17552}
17553
17554// MaxPoolGradAttr is an optional argument to MaxPoolGrad.
17555type MaxPoolGradAttr func(optionalAttr)
17556
17557// MaxPoolGradExplicitPaddings sets the optional explicit_paddings attribute to value.
17558// If not specified, defaults to <>
17559func MaxPoolGradExplicitPaddings(value []int64) MaxPoolGradAttr {
17560	return func(m optionalAttr) {
17561		m["explicit_paddings"] = value
17562	}
17563}
17564
17565// MaxPoolGradDataFormat sets the optional data_format attribute to value.
17566//
17567// value: Specify the data format of the input and output data. With the
17568// default format "NHWC", the data is stored in the order of:
17569//     [batch, in_height, in_width, in_channels].
17570// Alternatively, the format could be "NCHW", the data storage order of:
17571//     [batch, in_channels, in_height, in_width].
17572// If not specified, defaults to "NHWC"
17573func MaxPoolGradDataFormat(value string) MaxPoolGradAttr {
17574	return func(m optionalAttr) {
17575		m["data_format"] = value
17576	}
17577}
17578
17579// Computes gradients of the maxpooling function.
17580//
17581// Arguments:
17582//	orig_input: The original input tensor.
17583//	orig_output: The original output tensor.
17584//	grad: 4-D.  Gradients w.r.t. the output of `max_pool`.
17585//	ksize: The size of the window for each dimension of the input tensor.
17586//	strides: The stride of the sliding window for each dimension of the
17587// input tensor.
17588//	padding: The type of padding algorithm to use.
17589//
17590// Returns Gradients w.r.t. the input to `max_pool`.
17591func MaxPoolGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPoolGradAttr) (output tf.Output) {
17592	if scope.Err() != nil {
17593		return
17594	}
17595	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
17596	for _, a := range optional {
17597		a(attrs)
17598	}
17599	opspec := tf.OpSpec{
17600		Type: "MaxPoolGrad",
17601		Input: []tf.Input{
17602			orig_input, orig_output, grad,
17603		},
17604		Attrs: attrs,
17605	}
17606	op := scope.AddOperation(opspec)
17607	return op.Output(0)
17608}
17609
17610// Rolls the elements of a tensor along an axis.
17611//
17612// The elements are shifted positively (towards larger indices) by the offset of
17613// `shift` along the dimension of `axis`. Negative `shift` values will shift
17614// elements in the opposite direction. Elements that roll passed the last position
17615// will wrap around to the first and vice versa. Multiple shifts along multiple
17616// axes may be specified.
17617//
17618// For example:
17619//
17620// ```
17621// # 't' is [0, 1, 2, 3, 4]
17622// roll(t, shift=2, axis=0) ==> [3, 4, 0, 1, 2]
17623//
17624// # shifting along multiple dimensions
17625// # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
17626// roll(t, shift=[1, -2], axis=[0, 1]) ==> [[7, 8, 9, 5, 6], [2, 3, 4, 0, 1]]
17627//
17628// # shifting along the same axis multiple times
17629// # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
17630// roll(t, shift=[2, -3], axis=[1, 1]) ==> [[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]]
17631// ```
17632//
17633// Arguments:
17634//
17635//	shift: Dimension must be 0-D or 1-D. `shift[i]` specifies the number of places by which
17636// elements are shifted positively (towards larger indices) along the dimension
17637// specified by `axis[i]`. Negative shifts will roll the elements in the opposite
17638// direction.
17639//	axis: Dimension must be 0-D or 1-D. `axis[i]` specifies the dimension that the shift
17640// `shift[i]` should occur. If the same axis is referenced more than once, the
17641// total shift for that axis will be the sum of all the shifts that belong to that
17642// axis.
17643//
17644// Returns Has the same shape and size as the input. The elements are shifted
17645// positively (towards larger indices) by the offsets of `shift` along the
17646// dimensions of `axis`.
17647func Roll(scope *Scope, input tf.Output, shift tf.Output, axis tf.Output) (output tf.Output) {
17648	if scope.Err() != nil {
17649		return
17650	}
17651	opspec := tf.OpSpec{
17652		Type: "Roll",
17653		Input: []tf.Input{
17654			input, shift, axis,
17655		},
17656	}
17657	op := scope.AddOperation(opspec)
17658	return op.Output(0)
17659}
17660
17661// OrderedMapUnstageAttr is an optional argument to OrderedMapUnstage.
17662type OrderedMapUnstageAttr func(optionalAttr)
17663
17664// OrderedMapUnstageCapacity sets the optional capacity attribute to value.
17665// If not specified, defaults to 0
17666//
17667// REQUIRES: value >= 0
17668func OrderedMapUnstageCapacity(value int64) OrderedMapUnstageAttr {
17669	return func(m optionalAttr) {
17670		m["capacity"] = value
17671	}
17672}
17673
17674// OrderedMapUnstageMemoryLimit sets the optional memory_limit attribute to value.
17675// If not specified, defaults to 0
17676//
17677// REQUIRES: value >= 0
17678func OrderedMapUnstageMemoryLimit(value int64) OrderedMapUnstageAttr {
17679	return func(m optionalAttr) {
17680		m["memory_limit"] = value
17681	}
17682}
17683
17684// OrderedMapUnstageContainer sets the optional container attribute to value.
17685// If not specified, defaults to ""
17686func OrderedMapUnstageContainer(value string) OrderedMapUnstageAttr {
17687	return func(m optionalAttr) {
17688		m["container"] = value
17689	}
17690}
17691
17692// OrderedMapUnstageSharedName sets the optional shared_name attribute to value.
17693// If not specified, defaults to ""
17694func OrderedMapUnstageSharedName(value string) OrderedMapUnstageAttr {
17695	return func(m optionalAttr) {
17696		m["shared_name"] = value
17697	}
17698}
17699
17700// Op removes and returns the values associated with the key
17701//
17702// from the underlying container.   If the underlying container
17703// does not contain this key, the op will block until it does.
17704func OrderedMapUnstage(scope *Scope, key tf.Output, indices tf.Output, dtypes []tf.DataType, optional ...OrderedMapUnstageAttr) (values []tf.Output) {
17705	if scope.Err() != nil {
17706		return
17707	}
17708	attrs := map[string]interface{}{"dtypes": dtypes}
17709	for _, a := range optional {
17710		a(attrs)
17711	}
17712	opspec := tf.OpSpec{
17713		Type: "OrderedMapUnstage",
17714		Input: []tf.Input{
17715			key, indices,
17716		},
17717		Attrs: attrs,
17718	}
17719	op := scope.AddOperation(opspec)
17720	if scope.Err() != nil {
17721		return
17722	}
17723	var idx int
17724	var err error
17725	if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
17726		scope.UpdateErr("OrderedMapUnstage", err)
17727		return
17728	}
17729	return values
17730}
17731
17732// SobolSampleAttr is an optional argument to SobolSample.
17733type SobolSampleAttr func(optionalAttr)
17734
17735// SobolSampleDtype sets the optional dtype attribute to value.
17736//
17737// value: The type of the sample. One of: `float32` or `float64`.
17738// If not specified, defaults to DT_FLOAT
17739func SobolSampleDtype(value tf.DataType) SobolSampleAttr {
17740	return func(m optionalAttr) {
17741		m["dtype"] = value
17742	}
17743}
17744
17745// Generates points from the Sobol sequence.
17746//
17747// Creates a Sobol sequence with `num_results` samples. Each sample has dimension
17748// `dim`. Skips the first `skip` samples.
17749//
17750// Arguments:
17751//	dim: Positive scalar `Tensor` representing each sample's dimension.
17752//	num_results: Positive scalar `Tensor` of dtype int32. The number of Sobol points to return
17753// in the output.
17754//	skip: Positive scalar `Tensor` of dtype int32. The number of initial points of the
17755// Sobol sequence to skip.
17756//
17757// Returns `Tensor` of samples from Sobol sequence with `shape` [num_results, dim].
17758func SobolSample(scope *Scope, dim tf.Output, num_results tf.Output, skip tf.Output, optional ...SobolSampleAttr) (samples tf.Output) {
17759	if scope.Err() != nil {
17760		return
17761	}
17762	attrs := map[string]interface{}{}
17763	for _, a := range optional {
17764		a(attrs)
17765	}
17766	opspec := tf.OpSpec{
17767		Type: "SobolSample",
17768		Input: []tf.Input{
17769			dim, num_results, skip,
17770		},
17771		Attrs: attrs,
17772	}
17773	op := scope.AddOperation(opspec)
17774	return op.Output(0)
17775}
17776
17777// QuantizedReluAttr is an optional argument to QuantizedRelu.
17778type QuantizedReluAttr func(optionalAttr)
17779
17780// QuantizedReluOutType sets the optional out_type attribute to value.
17781// If not specified, defaults to DT_QUINT8
17782func QuantizedReluOutType(value tf.DataType) QuantizedReluAttr {
17783	return func(m optionalAttr) {
17784		m["out_type"] = value
17785	}
17786}
17787
17788// Computes Quantized Rectified Linear: `max(features, 0)`
17789//
17790// Arguments:
17791//
17792//	min_features: The float value that the lowest quantized value represents.
17793//	max_features: The float value that the highest quantized value represents.
17794//
17795// Returns:
17796//	activations: Has the same output shape as "features".
17797//	min_activations: The float value that the lowest quantized value represents.
17798//	max_activations: The float value that the highest quantized value represents.
17799func QuantizedRelu(scope *Scope, features tf.Output, min_features tf.Output, max_features tf.Output, optional ...QuantizedReluAttr) (activations tf.Output, min_activations tf.Output, max_activations tf.Output) {
17800	if scope.Err() != nil {
17801		return
17802	}
17803	attrs := map[string]interface{}{}
17804	for _, a := range optional {
17805		a(attrs)
17806	}
17807	opspec := tf.OpSpec{
17808		Type: "QuantizedRelu",
17809		Input: []tf.Input{
17810			features, min_features, max_features,
17811		},
17812		Attrs: attrs,
17813	}
17814	op := scope.AddOperation(opspec)
17815	return op.Output(0), op.Output(1), op.Output(2)
17816}
17817
17818// Returns the next representable value of `x1` in the direction of `x2`, element-wise.
17819//
17820// This operation returns the same result as the C++ std::nextafter function.
17821//
17822// It can also return a subnormal number.
17823//
17824// @compatibility(cpp)
17825// Equivalent to C++ std::nextafter function.
17826// @end_compatibility
17827func NextAfter(scope *Scope, x1 tf.Output, x2 tf.Output) (output tf.Output) {
17828	if scope.Err() != nil {
17829		return
17830	}
17831	opspec := tf.OpSpec{
17832		Type: "NextAfter",
17833		Input: []tf.Input{
17834			x1, x2,
17835		},
17836	}
17837	op := scope.AddOperation(opspec)
17838	return op.Output(0)
17839}
17840
17841// Bucketizes 'input' based on 'boundaries'.
17842//
17843// For example, if the inputs are
17844//     boundaries = [0, 10, 100]
17845//     input = [[-5, 10000]
17846//              [150,   10]
17847//              [5,    100]]
17848//
17849// then the output will be
17850//     output = [[0, 3]
17851//               [3, 2]
17852//               [1, 3]]
17853//
17854// Arguments:
17855//	input: Any shape of Tensor contains with int or float type.
17856//	boundaries: A sorted list of floats gives the boundary of the buckets.
17857//
17858// Returns Same shape with 'input', each value of input replaced with bucket index.
17859//
17860// @compatibility(numpy)
17861// Equivalent to np.digitize.
17862// @end_compatibility
17863func Bucketize(scope *Scope, input tf.Output, boundaries []float32) (output tf.Output) {
17864	if scope.Err() != nil {
17865		return
17866	}
17867	attrs := map[string]interface{}{"boundaries": boundaries}
17868	opspec := tf.OpSpec{
17869		Type: "Bucketize",
17870		Input: []tf.Input{
17871			input,
17872		},
17873		Attrs: attrs,
17874	}
17875	op := scope.AddOperation(opspec)
17876	return op.Output(0)
17877}
17878
17879// LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebugAttr is an optional argument to LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.
17880type LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebugAttr func(optionalAttr)
17881
17882// LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebugTableId sets the optional table_id attribute to value.
17883// If not specified, defaults to -1
17884func LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebugTableId(value int64) LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebugAttr {
17885	return func(m optionalAttr) {
17886		m["table_id"] = value
17887	}
17888}
17889
17890// LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebugTableName sets the optional table_name attribute to value.
17891// If not specified, defaults to ""
17892func LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebugTableName(value string) LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebugAttr {
17893	return func(m optionalAttr) {
17894		m["table_name"] = value
17895	}
17896}
17897
17898// LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebugConfig sets the optional config attribute to value.
17899// If not specified, defaults to ""
17900func LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebugConfig(value string) LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebugAttr {
17901	return func(m optionalAttr) {
17902		m["config"] = value
17903	}
17904}
17905
17906// Load frequency estimator embedding parameters with debug support.
17907//
17908// An op that loads optimization parameters into HBM for embedding. Must be
17909// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
17910// embedding table configuration. For example, this op is used to install
17911// parameters that are loaded from a checkpoint before a training loop is
17912// executed.
17913//
17914// Arguments:
17915//	parameters: Value of parameters used in the frequency estimator optimization algorithm.
17916//	last_hit_step: Value of last_hit_step used in the frequency estimator optimization algorithm.
17917//	gradient_accumulators: Value of gradient_accumulators used in the frequency estimator optimization
17918// algorithm.
17919//
17920//
17921//
17922// Returns the created operation.
17923func LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug(scope *Scope, parameters tf.Output, last_hit_step tf.Output, gradient_accumulators tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebugAttr) (o *tf.Operation) {
17924	if scope.Err() != nil {
17925		return
17926	}
17927	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
17928	for _, a := range optional {
17929		a(attrs)
17930	}
17931	opspec := tf.OpSpec{
17932		Type: "LoadTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug",
17933		Input: []tf.Input{
17934			parameters, last_hit_step, gradient_accumulators,
17935		},
17936		Attrs: attrs,
17937	}
17938	return scope.AddOperation(opspec)
17939}
17940
17941// Computes the log of the absolute value of `Gamma(x)` element-wise.
17942//
17943//   For positive numbers, this function computes log((input - 1)!) for every element in the tensor.
17944//   `lgamma(5) = log((5-1)!) = log(4!) = log(24) = 3.1780539`
17945//
17946// Example:
17947//
17948// ```python
17949// x = tf.constant([0, 0.5, 1, 4.5, -4, -5.6])
17950// tf.math.lgamma(x) ==> [inf, 0.5723649, 0., 2.4537368, inf, -4.6477685]
17951// ```
17952func Lgamma(scope *Scope, x tf.Output) (y tf.Output) {
17953	if scope.Err() != nil {
17954		return
17955	}
17956	opspec := tf.OpSpec{
17957		Type: "Lgamma",
17958		Input: []tf.Input{
17959			x,
17960		},
17961	}
17962	op := scope.AddOperation(opspec)
17963	return op.Output(0)
17964}
17965
17966// Reads the value of a variable.
17967//
17968// The tensor returned by this operation is immutable.
17969//
17970// The value returned by this operation is guaranteed to be influenced by all the
17971// writes on which this operation depends directly or indirectly, and to not be
17972// influenced by any of the writes which depend directly or indirectly on this
17973// operation.
17974//
17975// Arguments:
17976//	resource: handle to the resource in which to store the variable.
17977//	dtype: the dtype of the value.
17978func ReadVariableOp(scope *Scope, resource tf.Output, dtype tf.DataType) (value tf.Output) {
17979	if scope.Err() != nil {
17980		return
17981	}
17982	attrs := map[string]interface{}{"dtype": dtype}
17983	opspec := tf.OpSpec{
17984		Type: "ReadVariableOp",
17985		Input: []tf.Input{
17986			resource,
17987		},
17988		Attrs: attrs,
17989	}
17990	op := scope.AddOperation(opspec)
17991	return op.Output(0)
17992}
17993
17994// Computes a range that covers the actual values present in a quantized tensor.
17995//
17996// Given a quantized tensor described by `(input, input_min, input_max)`, outputs a
17997// range that covers the actual values present in that tensor. This op is typically
17998// used to produce the `requested_output_min` and `requested_output_max` for
17999// `Requantize`.
18000//
18001// Arguments:
18002//
18003//	input_min: The float value that the minimum quantized input value represents.
18004//	input_max: The float value that the maximum quantized input value represents.
18005//
18006// Returns:
18007//	output_min: The computed min output.
18008//	output_max: the computed max output.
18009func RequantizationRange(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output) (output_min tf.Output, output_max tf.Output) {
18010	if scope.Err() != nil {
18011		return
18012	}
18013	opspec := tf.OpSpec{
18014		Type: "RequantizationRange",
18015		Input: []tf.Input{
18016			input, input_min, input_max,
18017		},
18018	}
18019	op := scope.AddOperation(opspec)
18020	return op.Output(0), op.Output(1)
18021}
18022
18023// TPUPartitionedInputAttr is an optional argument to TPUPartitionedInput.
18024type TPUPartitionedInputAttr func(optionalAttr)
18025
18026// TPUPartitionedInputPartitionDim sets the optional partition_dim attribute to value.
18027//
18028// value: An integer describles which dimension is partitioned. -1 means
18029// those inputs are replicated.
18030// If not specified, defaults to 0
18031func TPUPartitionedInputPartitionDim(value int64) TPUPartitionedInputAttr {
18032	return func(m optionalAttr) {
18033		m["partition_dim"] = value
18034	}
18035}
18036
18037// An op that groups a list of partitioned inputs together. This op
18038//
18039// Arguments:
18040//	inputs: A list of partitioned inputs which must have the same shape.
18041//
18042// Returns A handle which represents the full shape of partitioned tensors.
18043func TPUPartitionedInput(scope *Scope, inputs []tf.Output, optional ...TPUPartitionedInputAttr) (output tf.Output) {
18044	if scope.Err() != nil {
18045		return
18046	}
18047	attrs := map[string]interface{}{}
18048	for _, a := range optional {
18049		a(attrs)
18050	}
18051	opspec := tf.OpSpec{
18052		Type: "TPUPartitionedInput",
18053		Input: []tf.Input{
18054			tf.OutputList(inputs),
18055		},
18056		Attrs: attrs,
18057	}
18058	op := scope.AddOperation(opspec)
18059	return op.Output(0)
18060}
18061
18062// Compare values of `input` to `threshold` and pack resulting bits into a `uint8`.
18063//
18064// Each comparison returns a boolean `true` (if `input_value > threshold`)
18065// or and `false` otherwise.
18066//
18067// This operation is useful for Locality-Sensitive-Hashing (LSH) and other
18068// algorithms that use hashing approximations of cosine and `L2` distances;
18069// codes can be generated from an input via:
18070//
18071// ```python
18072// codebook_size = 50
18073// codebook_bits = codebook_size * 32
18074// codebook = tf.get_variable('codebook', [x.shape[-1].value, codebook_bits],
18075//                            dtype=x.dtype,
18076//                            initializer=tf.orthogonal_initializer())
18077// codes = compare_and_threshold(tf.matmul(x, codebook), threshold=0.)
18078// codes = tf.bitcast(codes, tf.int32)  # go from uint8 to int32
18079// # now codes has shape x.shape[:-1] + [codebook_size]
18080// ```
18081//
18082// **NOTE**: Currently, the innermost dimension of the tensor must be divisible
18083// by 8.
18084//
18085// Given an `input` shaped `[s0, s1, ..., s_n]`, the output is
18086// a `uint8` tensor shaped `[s0, s1, ..., s_n / 8]`.
18087//
18088// Arguments:
18089//	input: Values to compare against `threshold` and bitpack.
18090//	threshold: Threshold to compare against.
18091//
18092// Returns The bitpacked comparisons.
18093func CompareAndBitpack(scope *Scope, input tf.Output, threshold tf.Output) (output tf.Output) {
18094	if scope.Err() != nil {
18095		return
18096	}
18097	opspec := tf.OpSpec{
18098		Type: "CompareAndBitpack",
18099		Input: []tf.Input{
18100			input, threshold,
18101		},
18102	}
18103	op := scope.AddOperation(opspec)
18104	return op.Output(0)
18105}
18106
18107// Tensor contraction according to Einstein summation convention.
18108//
18109// Implements generalized Tensor contraction and reduction. Each input Tensor must
18110// have a corresponding input subscript appearing in the comma-separated left-hand
18111// side of the equation. The right-hand side of the equation consists of the
18112// output subscript. The input subscripts and the output subscript should consist
18113// of zero or more named axis labels and at most one ellipsis (`...`).
18114//
18115// The named axis labels may be any single character other than those having
18116// special meaning, namely `,.->`. The behavior of this Op is undefined if it
18117// receives an ill-formatted equation; since the validation is done at
18118// graph-building time, we omit format validation checks at runtime.
18119//
18120// Note: This Op is *not* intended to be called by the user; instead users should
18121// call `tf.einsum` directly. It is a hidden Op used by `tf.einsum`.
18122//
18123// Operations are applied to the input(s) according to the following rules:
18124//
18125//  (a) Generalized Diagonals: For input dimensions corresponding to axis labels
18126//      appearing more than once in the same input subscript, we take the
18127//      generalized (`k`-dimensional) diagonal.
18128//      For example, in the equation `iii->i` with input shape `[3, 3, 3]`, the
18129//      generalized diagonal would consist of `3` elements at indices `(0, 0, 0)`,
18130//      `(1, 1, 1)` and `(2, 2, 2)` to create a Tensor of shape `[3]`.
18131//
18132//  (b) Reduction: Axes corresponding to labels appearing only in one input
18133//      subscript but not in the output subscript are summed over prior to Tensor
18134//      contraction.
18135//      For example, in the equation `ab,bc->b`, the axis labels `a` and `c` are
18136//      the reduction axis labels.
18137//
18138//  (c) Batch Dimensions: Axes corresponding to labels appearing in each of the
18139//      input subscripts and also in the output subscript make up the batch
18140//      dimensions in Tensor contraction. Unnamed axis labels corresponding to
18141//      ellipsis (`...`) also correspond to batch dimensions.
18142//      For example, for the equation denoting batch matrix multiplication,
18143//      `bij,bjk->bik`, the axis label `b` corresponds to a batch dimension.
18144//
18145//  (d) Contraction: In case of binary einsum, axes corresponding to labels
18146//      appearing in two different inputs (and not in the output) are contracted
18147//      against each other.
18148//      Considering the batch matrix multiplication equation again
18149//      (`bij,bjk->bik`), the contracted axis label is `j`.
18150//
18151//  (e) Expand Diagonal: If the output subscripts contain repeated (explicit) axis
18152//      labels, the opposite operation of (a) is applied. For example, in the
18153//      equation `i->iii`, and input shape `[3]`, the output of shape `[3, 3, 3]`
18154//      are all zeros, except for the (generalized) diagonal which is populated
18155//      with values from the input.
18156//      Note: This operation is not supported by `np.einsum` or `tf.einsum`; it is
18157//      provided to enable computing the symbolic gradient of `tf.einsum`.
18158//
18159// The output subscripts must contain only labels appearing in at least one of the
18160// input subscripts. Furthermore, all dimensions mapping to the same axis label
18161// must be equal.
18162//
18163// Any of the input and output subscripts may contain at most a single ellipsis
18164// (`...`). These ellipsis are mapped against dimensions not corresponding to any
18165// named axis label. If two inputs contain ellipsis, then they are broadcasted
18166// according to standard NumPy broadcasting
18167// [rules](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).
18168//
18169// The broadcasted dimensions are placed in the corresponding location of the
18170// ellipsis in the output subscript. If the broadcasted dimensions are non-empty
18171// and the output subscripts do not contain ellipsis, then an InvalidArgument error
18172// is raised.
18173//
18174// @compatibility(numpy)
18175// Similar to [`numpy.einsum`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.einsum.html).
18176//
18177// Comparison with `numpy.einsum`:
18178//
18179//  * This Op only supports unary and binary forms of `numpy.einsum`.
18180//  * This Op does not support implicit form. (i.e. equations without `->`).
18181//  * This Op also supports repeated indices in the output subscript, which is not
18182//    supported by `numpy.einsum`.
18183// @end_compatibility
18184//
18185//
18186// Arguments:
18187//	inputs: List of 1 or 2 Tensors.
18188//	equation: String describing the Einstein Summation operation; in the format of np.einsum.
18189//
18190// Returns Output Tensor with shape depending upon `equation`.
18191func Einsum(scope *Scope, inputs []tf.Output, equation string) (output tf.Output) {
18192	if scope.Err() != nil {
18193		return
18194	}
18195	attrs := map[string]interface{}{"equation": equation}
18196	opspec := tf.OpSpec{
18197		Type: "Einsum",
18198		Input: []tf.Input{
18199			tf.OutputList(inputs),
18200		},
18201		Attrs: attrs,
18202	}
18203	op := scope.AddOperation(opspec)
18204	return op.Output(0)
18205}
18206
18207// Convert the quantized 'input' tensor into a lower-precision 'output', using the
18208//
18209// actual distribution of the values to maximize the usage of the lower bit depth
18210// and adjusting the output min and max ranges accordingly.
18211//
18212// [input_min, input_max] are scalar floats that specify the range for the float
18213// interpretation of the 'input' data. For example, if input_min is -1.0f and
18214// input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0
18215// value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.
18216//
18217// This operator tries to squeeze as much precision as possible into an output with
18218// a lower bit depth by calculating the actual min and max values found in the
18219// data. For example, maybe that quint16 input has no values lower than 16,384 and
18220// none higher than 49,152. That means only half the range is actually needed, all
18221// the float interpretations are between -0.5f and 0.5f, so if we want to compress
18222// the data into a quint8 output, we can use that range rather than the theoretical
18223// -1.0f to 1.0f that is suggested by the input min and max.
18224//
18225// In practice, this is most useful for taking output from operations like
18226// QuantizedMatMul that can produce higher bit-depth outputs than their inputs and
18227// may have large potential output ranges, but in practice have a distribution of
18228// input values that only uses a small fraction of the possible range. By feeding
18229// that output into this operator, we can reduce it from 32 bits down to 8 with
18230// minimal loss of accuracy.
18231//
18232// Arguments:
18233//
18234//	input_min: The float value that the minimum quantized input value represents.
18235//	input_max: The float value that the maximum quantized input value represents.
18236//	out_type: The type of the output. Should be a lower bit depth than Tinput.
18237//
18238// Returns:
18239//	output
18240//	output_min: The float value that the minimum quantized output value represents.
18241//	output_max: The float value that the maximum quantized output value represents.
18242func QuantizeDownAndShrinkRange(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, out_type tf.DataType) (output tf.Output, output_min tf.Output, output_max tf.Output) {
18243	if scope.Err() != nil {
18244		return
18245	}
18246	attrs := map[string]interface{}{"out_type": out_type}
18247	opspec := tf.OpSpec{
18248		Type: "QuantizeDownAndShrinkRange",
18249		Input: []tf.Input{
18250			input, input_min, input_max,
18251		},
18252		Attrs: attrs,
18253	}
18254	op := scope.AddOperation(opspec)
18255	return op.Output(0), op.Output(1), op.Output(2)
18256}
18257
18258// Converts each string in the input Tensor to its hash mod by a number of buckets.
18259//
18260// The hash function is deterministic on the content of the string within the
18261// process.
18262//
18263// Note that the hash function may change from time to time.
18264// This functionality will be deprecated and it's recommended to use
18265// `tf.string_to_hash_bucket_fast()` or `tf.string_to_hash_bucket_strong()`.
18266//
18267// Arguments:
18268//
18269//	num_buckets: The number of buckets.
18270//
18271// Returns A Tensor of the same shape as the input `string_tensor`.
18272func StringToHashBucket(scope *Scope, string_tensor tf.Output, num_buckets int64) (output tf.Output) {
18273	if scope.Err() != nil {
18274		return
18275	}
18276	attrs := map[string]interface{}{"num_buckets": num_buckets}
18277	opspec := tf.OpSpec{
18278		Type: "StringToHashBucket",
18279		Input: []tf.Input{
18280			string_tensor,
18281		},
18282		Attrs: attrs,
18283	}
18284	op := scope.AddOperation(opspec)
18285	return op.Output(0)
18286}
18287
18288// Computes softsign: `features / (abs(features) + 1)`.
18289func Softsign(scope *Scope, features tf.Output) (activations tf.Output) {
18290	if scope.Err() != nil {
18291		return
18292	}
18293	opspec := tf.OpSpec{
18294		Type: "Softsign",
18295		Input: []tf.Input{
18296			features,
18297		},
18298	}
18299	op := scope.AddOperation(opspec)
18300	return op.Output(0)
18301}
18302
18303// QuantizedAddAttr is an optional argument to QuantizedAdd.
18304type QuantizedAddAttr func(optionalAttr)
18305
18306// QuantizedAddToutput sets the optional Toutput attribute to value.
18307// If not specified, defaults to DT_QINT32
18308func QuantizedAddToutput(value tf.DataType) QuantizedAddAttr {
18309	return func(m optionalAttr) {
18310		m["Toutput"] = value
18311	}
18312}
18313
18314// Returns x + y element-wise, working on quantized buffers.
18315//
18316// Arguments:
18317//
18318//
18319//	min_x: The float value that the lowest quantized `x` value represents.
18320//	max_x: The float value that the highest quantized `x` value represents.
18321//	min_y: The float value that the lowest quantized `y` value represents.
18322//	max_y: The float value that the highest quantized `y` value represents.
18323//
18324// Returns:
18325//	z
18326//	min_z: The float value that the lowest quantized output value represents.
18327//	max_z: The float value that the highest quantized output value represents.
18328//
18329// *NOTE*: `QuantizedAdd` supports limited forms of broadcasting. More about
18330// broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
18331func QuantizedAdd(scope *Scope, x tf.Output, y tf.Output, min_x tf.Output, max_x tf.Output, min_y tf.Output, max_y tf.Output, optional ...QuantizedAddAttr) (z tf.Output, min_z tf.Output, max_z tf.Output) {
18332	if scope.Err() != nil {
18333		return
18334	}
18335	attrs := map[string]interface{}{}
18336	for _, a := range optional {
18337		a(attrs)
18338	}
18339	opspec := tf.OpSpec{
18340		Type: "QuantizedAdd",
18341		Input: []tf.Input{
18342			x, y, min_x, max_x, min_y, max_y,
18343		},
18344		Attrs: attrs,
18345	}
18346	op := scope.AddOperation(opspec)
18347	return op.Output(0), op.Output(1), op.Output(2)
18348}
18349
18350// ShuffleAndRepeatDatasetAttr is an optional argument to ShuffleAndRepeatDataset.
18351type ShuffleAndRepeatDatasetAttr func(optionalAttr)
18352
18353// ShuffleAndRepeatDatasetReshuffleEachIteration sets the optional reshuffle_each_iteration attribute to value.
18354// If not specified, defaults to true
18355func ShuffleAndRepeatDatasetReshuffleEachIteration(value bool) ShuffleAndRepeatDatasetAttr {
18356	return func(m optionalAttr) {
18357		m["reshuffle_each_iteration"] = value
18358	}
18359}
18360
18361// Creates a dataset that shuffles and repeats elements from `input_dataset`
18362//
18363// pseudorandomly.
18364//
18365// Arguments:
18366//
18367//	buffer_size: The number of output elements to buffer in an iterator over
18368// this dataset. Compare with the `min_after_dequeue` attr when creating a
18369// `RandomShuffleQueue`.
18370//	seed: A scalar seed for the random number generator. If either `seed` or
18371// `seed2` is set to be non-zero, the random number generator is seeded
18372// by the given seed.  Otherwise, a random seed is used.
18373//	seed2: A second scalar seed to avoid seed collision.
18374//	count: A scalar representing the number of times the underlying dataset
18375// should be repeated. The default is `-1`, which results in infinite repetition.
18376//
18377//
18378func ShuffleAndRepeatDataset(scope *Scope, input_dataset tf.Output, buffer_size tf.Output, seed tf.Output, seed2 tf.Output, count tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ShuffleAndRepeatDatasetAttr) (handle tf.Output) {
18379	if scope.Err() != nil {
18380		return
18381	}
18382	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
18383	for _, a := range optional {
18384		a(attrs)
18385	}
18386	opspec := tf.OpSpec{
18387		Type: "ShuffleAndRepeatDataset",
18388		Input: []tf.Input{
18389			input_dataset, buffer_size, seed, seed2, count,
18390		},
18391		Attrs: attrs,
18392	}
18393	op := scope.AddOperation(opspec)
18394	return op.Output(0)
18395}
18396
18397// QuantizedMulAttr is an optional argument to QuantizedMul.
18398type QuantizedMulAttr func(optionalAttr)
18399
18400// QuantizedMulToutput sets the optional Toutput attribute to value.
18401// If not specified, defaults to DT_QINT32
18402func QuantizedMulToutput(value tf.DataType) QuantizedMulAttr {
18403	return func(m optionalAttr) {
18404		m["Toutput"] = value
18405	}
18406}
18407
18408// Returns x * y element-wise, working on quantized buffers.
18409//
18410// Arguments:
18411//
18412//
18413//	min_x: The float value that the lowest quantized `x` value represents.
18414//	max_x: The float value that the highest quantized `x` value represents.
18415//	min_y: The float value that the lowest quantized `y` value represents.
18416//	max_y: The float value that the highest quantized `y` value represents.
18417//
18418// Returns:
18419//	z
18420//	min_z: The float value that the lowest quantized output value represents.
18421//	max_z: The float value that the highest quantized output value represents.
18422//
18423// *NOTE*: `QuantizedMul` supports limited forms of broadcasting. More about
18424// broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
18425func QuantizedMul(scope *Scope, x tf.Output, y tf.Output, min_x tf.Output, max_x tf.Output, min_y tf.Output, max_y tf.Output, optional ...QuantizedMulAttr) (z tf.Output, min_z tf.Output, max_z tf.Output) {
18426	if scope.Err() != nil {
18427		return
18428	}
18429	attrs := map[string]interface{}{}
18430	for _, a := range optional {
18431		a(attrs)
18432	}
18433	opspec := tf.OpSpec{
18434		Type: "QuantizedMul",
18435		Input: []tf.Input{
18436			x, y, min_x, max_x, min_y, max_y,
18437		},
18438		Attrs: attrs,
18439	}
18440	op := scope.AddOperation(opspec)
18441	return op.Output(0), op.Output(1), op.Output(2)
18442}
18443
18444// CumulativeLogsumexpAttr is an optional argument to CumulativeLogsumexp.
18445type CumulativeLogsumexpAttr func(optionalAttr)
18446
18447// CumulativeLogsumexpExclusive sets the optional exclusive attribute to value.
18448//
18449// value: If `True`, perform exclusive cumulative log-sum-exp.
18450// If not specified, defaults to false
18451func CumulativeLogsumexpExclusive(value bool) CumulativeLogsumexpAttr {
18452	return func(m optionalAttr) {
18453		m["exclusive"] = value
18454	}
18455}
18456
18457// CumulativeLogsumexpReverse sets the optional reverse attribute to value.
18458//
18459// value: A `bool` (default: False).
18460// If not specified, defaults to false
18461func CumulativeLogsumexpReverse(value bool) CumulativeLogsumexpAttr {
18462	return func(m optionalAttr) {
18463		m["reverse"] = value
18464	}
18465}
18466
18467// Compute the cumulative product of the tensor `x` along `axis`.
18468//
18469// By default, this op performs an inclusive cumulative log-sum-exp,
18470// which means that the first
18471// element of the input is identical to the first element of the output:
18472// ```python
18473// tf.math.cumulative_logsumexp([a, b, c])  # => [a, log(exp(a) + exp(b)), log(exp(a) + exp(b) + exp(c))]
18474// ```
18475//
18476// By setting the `exclusive` kwarg to `True`, an exclusive cumulative log-sum-exp is
18477// performed instead:
18478// ```python
18479// tf.cumulative_logsumexp([a, b, c], exclusive=True)  # => [-inf, a, log(exp(a) * exp(b))]
18480// ```
18481// Note that the neutral element of the log-sum-exp operation is `-inf`,
18482// however, for performance reasons, the minimal value representable by the
18483// floating point type is used instead.
18484//
18485// By setting the `reverse` kwarg to `True`, the cumulative log-sum-exp is performed in the
18486// opposite direction.
18487//
18488// Arguments:
18489//	x: A `Tensor`. Must be one of the following types: `float16`, `float32`, `float64`.
18490//	axis: A `Tensor` of type `int32` (default: 0). Must be in the range
18491// `[-rank(x), rank(x))`.
18492func CumulativeLogsumexp(scope *Scope, x tf.Output, axis tf.Output, optional ...CumulativeLogsumexpAttr) (out tf.Output) {
18493	if scope.Err() != nil {
18494		return
18495	}
18496	attrs := map[string]interface{}{}
18497	for _, a := range optional {
18498		a(attrs)
18499	}
18500	opspec := tf.OpSpec{
18501		Type: "CumulativeLogsumexp",
18502		Input: []tf.Input{
18503			x, axis,
18504		},
18505		Attrs: attrs,
18506	}
18507	op := scope.AddOperation(opspec)
18508	return op.Output(0)
18509}
18510
18511// SparseBincountAttr is an optional argument to SparseBincount.
18512type SparseBincountAttr func(optionalAttr)
18513
18514// SparseBincountBinaryOutput sets the optional binary_output attribute to value.
18515//
18516// value: bool; Whether the kernel should count the appearance or number of occurrences.
18517// If not specified, defaults to false
18518func SparseBincountBinaryOutput(value bool) SparseBincountAttr {
18519	return func(m optionalAttr) {
18520		m["binary_output"] = value
18521	}
18522}
18523
18524// Counts the number of occurrences of each value in an integer array.
18525//
18526// Outputs a vector with length `size` and the same dtype as `weights`. If
18527// `weights` are empty, then index `i` stores the number of times the value `i` is
18528// counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of
18529// the value in `weights` at each index where the corresponding value in `arr` is
18530// `i`.
18531//
18532// Values in `arr` outside of the range [0, size) are ignored.
18533//
18534// Arguments:
18535//	indices: 2D int64 `Tensor`.
18536//	values: 1D int `Tensor`.
18537//	dense_shape: 1D int64 `Tensor`.
18538//	size: non-negative int scalar `Tensor`.
18539//	weights: is an int32, int64, float32, or float64 `Tensor` with the same
18540// shape as `input`, or a length-0 `Tensor`, in which case it acts as all weights
18541// equal to 1.
18542//
18543// Returns 1D `Tensor` with length equal to `size` or 2D `Tensor` with [batch_size, `size`].
18544// The counts or summed weights for each value in the range [0, size).
18545func SparseBincount(scope *Scope, indices tf.Output, values tf.Output, dense_shape tf.Output, size tf.Output, weights tf.Output, optional ...SparseBincountAttr) (output tf.Output) {
18546	if scope.Err() != nil {
18547		return
18548	}
18549	attrs := map[string]interface{}{}
18550	for _, a := range optional {
18551		a(attrs)
18552	}
18553	opspec := tf.OpSpec{
18554		Type: "SparseBincount",
18555		Input: []tf.Input{
18556			indices, values, dense_shape, size, weights,
18557		},
18558		Attrs: attrs,
18559	}
18560	op := scope.AddOperation(opspec)
18561	return op.Output(0)
18562}
18563
18564// Computes scaled exponential linear: `scale * alpha * (exp(features) - 1)`
18565//
18566// if < 0, `scale * features` otherwise.
18567//
18568// To be used together with
18569// `initializer = tf.variance_scaling_initializer(factor=1.0, mode='FAN_IN')`.
18570// For correct dropout, use `tf.contrib.nn.alpha_dropout`.
18571//
18572// See [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
18573func Selu(scope *Scope, features tf.Output) (activations tf.Output) {
18574	if scope.Err() != nil {
18575		return
18576	}
18577	opspec := tf.OpSpec{
18578		Type: "Selu",
18579		Input: []tf.Input{
18580			features,
18581		},
18582	}
18583	op := scope.AddOperation(opspec)
18584	return op.Output(0)
18585}
18586
18587// DenseBincountAttr is an optional argument to DenseBincount.
18588type DenseBincountAttr func(optionalAttr)
18589
18590// DenseBincountBinaryOutput sets the optional binary_output attribute to value.
18591//
18592// value: bool; Whether the kernel should count the appearance or number of occurrences.
18593// If not specified, defaults to false
18594func DenseBincountBinaryOutput(value bool) DenseBincountAttr {
18595	return func(m optionalAttr) {
18596		m["binary_output"] = value
18597	}
18598}
18599
18600// Counts the number of occurrences of each value in an integer array.
18601//
18602// Outputs a vector with length `size` and the same dtype as `weights`. If
18603// `weights` are empty, then index `i` stores the number of times the value `i` is
18604// counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of
18605// the value in `weights` at each index where the corresponding value in `arr` is
18606// `i`.
18607//
18608// Values in `arr` outside of the range [0, size) are ignored.
18609//
18610// Arguments:
18611//	input: 1D or 2D int `Tensor`.
18612//	size: non-negative int scalar `Tensor`.
18613//	weights: is an int32, int64, float32, or float64 `Tensor` with the same
18614// shape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights
18615// equal to 1.
18616//
18617// Returns 1D `Tensor` with length equal to `size` or 2D `Tensor` with [batch_size, `size`].
18618// The counts or summed weights for each value in the range [0, size).
18619func DenseBincount(scope *Scope, input tf.Output, size tf.Output, weights tf.Output, optional ...DenseBincountAttr) (output tf.Output) {
18620	if scope.Err() != nil {
18621		return
18622	}
18623	attrs := map[string]interface{}{}
18624	for _, a := range optional {
18625		a(attrs)
18626	}
18627	opspec := tf.OpSpec{
18628		Type: "DenseBincount",
18629		Input: []tf.Input{
18630			input, size, weights,
18631		},
18632		Attrs: attrs,
18633	}
18634	op := scope.AddOperation(opspec)
18635	return op.Output(0)
18636}
18637
18638// Returns the complex conjugate of a complex number.
18639//
18640// Given a tensor `input` of complex numbers, this operation returns a tensor of
18641// complex numbers that are the complex conjugate of each element in `input`. The
18642// complex numbers in `input` must be of the form \\(a + bj\\), where *a* is the
18643// real part and *b* is the imaginary part.
18644//
18645// The complex conjugate returned by this operation is of the form \\(a - bj\\).
18646//
18647// For example:
18648//
18649// ```
18650// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
18651// tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j]
18652// ```
18653func Conj(scope *Scope, input tf.Output) (output tf.Output) {
18654	if scope.Err() != nil {
18655		return
18656	}
18657	opspec := tf.OpSpec{
18658		Type: "Conj",
18659		Input: []tf.Input{
18660			input,
18661		},
18662	}
18663	op := scope.AddOperation(opspec)
18664	return op.Output(0)
18665}
18666
18667// ImagAttr is an optional argument to Imag.
18668type ImagAttr func(optionalAttr)
18669
18670// ImagTout sets the optional Tout attribute to value.
18671// If not specified, defaults to DT_FLOAT
18672func ImagTout(value tf.DataType) ImagAttr {
18673	return func(m optionalAttr) {
18674		m["Tout"] = value
18675	}
18676}
18677
18678// Returns the imaginary part of a complex number.
18679//
18680// Given a tensor `input` of complex numbers, this operation returns a tensor of
18681// type `float` that is the imaginary part of each element in `input`. All
18682// elements in `input` must be complex numbers of the form \\(a + bj\\), where *a*
18683// is the real part and *b* is the imaginary part returned by this operation.
18684//
18685// For example:
18686//
18687// ```
18688// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
18689// tf.imag(input) ==> [4.75, 5.75]
18690// ```
18691func Imag(scope *Scope, input tf.Output, optional ...ImagAttr) (output tf.Output) {
18692	if scope.Err() != nil {
18693		return
18694	}
18695	attrs := map[string]interface{}{}
18696	for _, a := range optional {
18697		a(attrs)
18698	}
18699	opspec := tf.OpSpec{
18700		Type: "Imag",
18701		Input: []tf.Input{
18702			input,
18703		},
18704		Attrs: attrs,
18705	}
18706	op := scope.AddOperation(opspec)
18707	return op.Output(0)
18708}
18709
18710// RealAttr is an optional argument to Real.
18711type RealAttr func(optionalAttr)
18712
18713// RealTout sets the optional Tout attribute to value.
18714// If not specified, defaults to DT_FLOAT
18715func RealTout(value tf.DataType) RealAttr {
18716	return func(m optionalAttr) {
18717		m["Tout"] = value
18718	}
18719}
18720
18721// Returns the real part of a complex number.
18722//
18723// Given a tensor `input` of complex numbers, this operation returns a tensor of
18724// type `float` that is the real part of each element in `input`. All elements in
18725// `input` must be complex numbers of the form \\(a + bj\\), where *a* is the real
18726//  part returned by this operation and *b* is the imaginary part.
18727//
18728// For example:
18729//
18730// ```
18731// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
18732// tf.real(input) ==> [-2.25, 3.25]
18733// ```
18734func Real(scope *Scope, input tf.Output, optional ...RealAttr) (output tf.Output) {
18735	if scope.Err() != nil {
18736		return
18737	}
18738	attrs := map[string]interface{}{}
18739	for _, a := range optional {
18740		a(attrs)
18741	}
18742	opspec := tf.OpSpec{
18743		Type: "Real",
18744		Input: []tf.Input{
18745			input,
18746		},
18747		Attrs: attrs,
18748	}
18749	op := scope.AddOperation(opspec)
18750	return op.Output(0)
18751}
18752
18753// DequantizeAttr is an optional argument to Dequantize.
18754type DequantizeAttr func(optionalAttr)
18755
18756// DequantizeMode sets the optional mode attribute to value.
18757// If not specified, defaults to "MIN_COMBINED"
18758func DequantizeMode(value string) DequantizeAttr {
18759	return func(m optionalAttr) {
18760		m["mode"] = value
18761	}
18762}
18763
18764// DequantizeNarrowRange sets the optional narrow_range attribute to value.
18765// If not specified, defaults to false
18766func DequantizeNarrowRange(value bool) DequantizeAttr {
18767	return func(m optionalAttr) {
18768		m["narrow_range"] = value
18769	}
18770}
18771
18772// DequantizeAxis sets the optional axis attribute to value.
18773// If not specified, defaults to -1
18774func DequantizeAxis(value int64) DequantizeAttr {
18775	return func(m optionalAttr) {
18776		m["axis"] = value
18777	}
18778}
18779
18780// DequantizeDtype sets the optional dtype attribute to value.
18781//
18782// value: Type of the output tensor. Currently Dequantize supports float and bfloat16.
18783// If 'dtype' is 'bfloat16', it only supports 'MIN_COMBINED' mode.
18784// If not specified, defaults to DT_FLOAT
18785func DequantizeDtype(value tf.DataType) DequantizeAttr {
18786	return func(m optionalAttr) {
18787		m["dtype"] = value
18788	}
18789}
18790
18791// Dequantize the 'input' tensor into a float or bfloat16 Tensor.
18792//
18793// [min_range, max_range] are scalar floats that specify the range for
18794// the output. The 'mode' attribute controls exactly which calculations are
18795// used to convert the float values to their quantized equivalents.
18796//
18797// In 'MIN_COMBINED' mode, each value of the tensor will undergo the following:
18798//
18799// ```
18800// if T == qint8: in[i] += (range(T) + 1)/ 2.0
18801// out[i] = min_range + (in[i]* (max_range - min_range) / range(T))
18802// ```
18803// here `range(T) = numeric_limits<T>::max() - numeric_limits<T>::min()`
18804//
18805// *MIN_COMBINED Mode Example*
18806//
18807// If the input comes from a QuantizedRelu6, the output type is
18808// quint8 (range of 0-255) but the possible range of QuantizedRelu6 is
18809// 0-6.  The min_range and max_range values are therefore 0.0 and 6.0.
18810// Dequantize on quint8 will take each value, cast to float, and multiply
18811// by 6 / 255.
18812// Note that if quantizedtype is qint8, the operation will additionally add
18813// each value by 128 prior to casting.
18814//
18815// If the mode is 'MIN_FIRST', then this approach is used:
18816//
18817// ```c++
18818// num_discrete_values = 1 << (# of bits in T)
18819// range_adjust = num_discrete_values / (num_discrete_values - 1)
18820// range = (range_max - range_min) * range_adjust
18821// range_scale = range / num_discrete_values
18822// const double offset_input = static_cast<double>(input) - lowest_quantized;
18823// result = range_min + ((input - numeric_limits<T>::min()) * range_scale)
18824// ```
18825//
18826// If the mode is `SCALED`, dequantization is performed by multiplying each
18827// input value by a scaling_factor. (Thus an input of 0 always maps to 0.0).
18828//
18829// The scaling_factor is determined from `min_range`, `max_range`, and
18830// `narrow_range` in a way that is compatible with `QuantizeAndDequantize{V2|V3}`
18831// and `QuantizeV2`, using the following algorithm:
18832//
18833// ```c++
18834//
18835//   const int min_expected_T = std::numeric_limits<T>::min() +
18836//     (narrow_range ? 1 : 0);
18837//   const int max_expected_T = std::numeric_limits<T>::max();
18838//   const float max_expected_T = std::numeric_limits<float>::max();
18839//
18840//   const float scale_factor =
18841//     (std::numeric_limits<T>::min() == 0) ? (max_range / max_expected_T)
18842//                                          : std::max(min_range / min_expected_T,
18843//                                                     max_range / max_expected_T);
18844// ```
18845//
18846// Arguments:
18847//
18848//	min_range: The minimum scalar value possibly produced for the input.
18849//	max_range: The maximum scalar value possibly produced for the input.
18850func Dequantize(scope *Scope, input tf.Output, min_range tf.Output, max_range tf.Output, optional ...DequantizeAttr) (output tf.Output) {
18851	if scope.Err() != nil {
18852		return
18853	}
18854	attrs := map[string]interface{}{}
18855	for _, a := range optional {
18856		a(attrs)
18857	}
18858	opspec := tf.OpSpec{
18859		Type: "Dequantize",
18860		Input: []tf.Input{
18861			input, min_range, max_range,
18862		},
18863		Attrs: attrs,
18864	}
18865	op := scope.AddOperation(opspec)
18866	return op.Output(0)
18867}
18868
18869// ComplexAttr is an optional argument to Complex.
18870type ComplexAttr func(optionalAttr)
18871
18872// ComplexTout sets the optional Tout attribute to value.
18873// If not specified, defaults to DT_COMPLEX64
18874func ComplexTout(value tf.DataType) ComplexAttr {
18875	return func(m optionalAttr) {
18876		m["Tout"] = value
18877	}
18878}
18879
18880// Converts two real numbers to a complex number.
18881//
18882// Given a tensor `real` representing the real part of a complex number, and a
18883// tensor `imag` representing the imaginary part of a complex number, this
18884// operation returns complex numbers elementwise of the form \\(a + bj\\), where
18885// *a* represents the `real` part and *b* represents the `imag` part.
18886//
18887// The input tensors `real` and `imag` must have the same shape.
18888//
18889// For example:
18890//
18891// ```
18892// # tensor 'real' is [2.25, 3.25]
18893// # tensor `imag` is [4.75, 5.75]
18894// tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]]
18895// ```
18896func Complex(scope *Scope, real tf.Output, imag tf.Output, optional ...ComplexAttr) (out tf.Output) {
18897	if scope.Err() != nil {
18898		return
18899	}
18900	attrs := map[string]interface{}{}
18901	for _, a := range optional {
18902		a(attrs)
18903	}
18904	opspec := tf.OpSpec{
18905		Type: "Complex",
18906		Input: []tf.Input{
18907			real, imag,
18908		},
18909		Attrs: attrs,
18910	}
18911	op := scope.AddOperation(opspec)
18912	return op.Output(0)
18913}
18914
18915// CudnnRNNCanonicalToParamsV2Attr is an optional argument to CudnnRNNCanonicalToParamsV2.
18916type CudnnRNNCanonicalToParamsV2Attr func(optionalAttr)
18917
18918// CudnnRNNCanonicalToParamsV2RnnMode sets the optional rnn_mode attribute to value.
18919// If not specified, defaults to "lstm"
18920func CudnnRNNCanonicalToParamsV2RnnMode(value string) CudnnRNNCanonicalToParamsV2Attr {
18921	return func(m optionalAttr) {
18922		m["rnn_mode"] = value
18923	}
18924}
18925
18926// CudnnRNNCanonicalToParamsV2InputMode sets the optional input_mode attribute to value.
18927// If not specified, defaults to "linear_input"
18928func CudnnRNNCanonicalToParamsV2InputMode(value string) CudnnRNNCanonicalToParamsV2Attr {
18929	return func(m optionalAttr) {
18930		m["input_mode"] = value
18931	}
18932}
18933
18934// CudnnRNNCanonicalToParamsV2Direction sets the optional direction attribute to value.
18935// If not specified, defaults to "unidirectional"
18936func CudnnRNNCanonicalToParamsV2Direction(value string) CudnnRNNCanonicalToParamsV2Attr {
18937	return func(m optionalAttr) {
18938		m["direction"] = value
18939	}
18940}
18941
18942// CudnnRNNCanonicalToParamsV2Dropout sets the optional dropout attribute to value.
18943// If not specified, defaults to 0
18944func CudnnRNNCanonicalToParamsV2Dropout(value float32) CudnnRNNCanonicalToParamsV2Attr {
18945	return func(m optionalAttr) {
18946		m["dropout"] = value
18947	}
18948}
18949
18950// CudnnRNNCanonicalToParamsV2Seed sets the optional seed attribute to value.
18951// If not specified, defaults to 0
18952func CudnnRNNCanonicalToParamsV2Seed(value int64) CudnnRNNCanonicalToParamsV2Attr {
18953	return func(m optionalAttr) {
18954		m["seed"] = value
18955	}
18956}
18957
18958// CudnnRNNCanonicalToParamsV2Seed2 sets the optional seed2 attribute to value.
18959// If not specified, defaults to 0
18960func CudnnRNNCanonicalToParamsV2Seed2(value int64) CudnnRNNCanonicalToParamsV2Attr {
18961	return func(m optionalAttr) {
18962		m["seed2"] = value
18963	}
18964}
18965
18966// CudnnRNNCanonicalToParamsV2NumProj sets the optional num_proj attribute to value.
18967// If not specified, defaults to 0
18968func CudnnRNNCanonicalToParamsV2NumProj(value int64) CudnnRNNCanonicalToParamsV2Attr {
18969	return func(m optionalAttr) {
18970		m["num_proj"] = value
18971	}
18972}
18973
18974// Converts CudnnRNN params from canonical form to usable form. It supports the projection in LSTM.
18975//
18976// Writes a set of weights into the opaque params buffer so they can be used in
18977// upcoming training or inferences.
18978//
18979// Note that the params buffer may not be compatible across different GPUs. So any
18980// save and restoration should be converted to and from the canonical weights and
18981// biases.
18982//
18983// num_layers: Specifies the number of layers in the RNN model.
18984// num_units: Specifies the size of the hidden state.
18985// input_size: Specifies the size of the input state.
18986// weights: the canonical form of weights that can be used for saving
18987//     and restoration. They are more likely to be compatible across different
18988//     generations.
18989// biases: the canonical form of biases that can be used for saving
18990//     and restoration. They are more likely to be compatible across different
18991//     generations.
18992// num_params_weights: number of weight parameter matrix for all layers.
18993// num_params_biases: number of bias parameter vector for all layers.
18994// rnn_mode: Indicates the type of the RNN model.
18995// input_mode: Indicate whether there is a linear projection between the input and
18996//     The actual computation before the first layer. 'skip_input' is only allowed
18997//     when input_size == num_units; 'auto_select' implies 'skip_input' when
18998//     input_size == num_units; otherwise, it implies 'linear_input'.
18999// direction: Indicates whether a bidirectional model will be used.
19000//     dir = (direction == bidirectional) ? 2 : 1
19001// dropout: dropout probability. When set to 0., dropout is disabled.
19002// seed: the 1st part of a seed to initialize dropout.
19003// seed2: the 2nd part of a seed to initialize dropout.
19004// num_proj: The output dimensionality for the projection matrices. If None or 0,
19005//     no projection is performed.
19006func CudnnRNNCanonicalToParamsV2(scope *Scope, num_layers tf.Output, num_units tf.Output, input_size tf.Output, weights []tf.Output, biases []tf.Output, optional ...CudnnRNNCanonicalToParamsV2Attr) (params tf.Output) {
19007	if scope.Err() != nil {
19008		return
19009	}
19010	attrs := map[string]interface{}{}
19011	for _, a := range optional {
19012		a(attrs)
19013	}
19014	opspec := tf.OpSpec{
19015		Type: "CudnnRNNCanonicalToParamsV2",
19016		Input: []tf.Input{
19017			num_layers, num_units, input_size, tf.OutputList(weights), tf.OutputList(biases),
19018		},
19019		Attrs: attrs,
19020	}
19021	op := scope.AddOperation(opspec)
19022	return op.Output(0)
19023}
19024
19025// Creates a sequence of numbers.
19026//
19027// This operation creates a sequence of numbers that begins at `start` and
19028// extends by increments of `delta` up to but not including `limit`.
19029//
19030// For example:
19031//
19032// ```
19033// # 'start' is 3
19034// # 'limit' is 18
19035// # 'delta' is 3
19036// tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]
19037// ```
19038//
19039// Arguments:
19040//	start: 0-D (scalar). First entry in the sequence.
19041//	limit: 0-D (scalar). Upper limit of sequence, exclusive.
19042//	delta: 0-D (scalar). Optional. Default is 1. Number that increments `start`.
19043//
19044// Returns 1-D.
19045func Range(scope *Scope, start tf.Output, limit tf.Output, delta tf.Output) (output tf.Output) {
19046	if scope.Err() != nil {
19047		return
19048	}
19049	opspec := tf.OpSpec{
19050		Type: "Range",
19051		Input: []tf.Input{
19052			start, limit, delta,
19053		},
19054	}
19055	op := scope.AddOperation(opspec)
19056	return op.Output(0)
19057}
19058
19059// AnyAttr is an optional argument to Any.
19060type AnyAttr func(optionalAttr)
19061
19062// AnyKeepDims sets the optional keep_dims attribute to value.
19063//
19064// value: If true, retain reduced dimensions with length 1.
19065// If not specified, defaults to false
19066func AnyKeepDims(value bool) AnyAttr {
19067	return func(m optionalAttr) {
19068		m["keep_dims"] = value
19069	}
19070}
19071
19072// Computes the "logical or" of elements across dimensions of a tensor.
19073//
19074// Reduces `input` along the dimensions given in `axis`. Unless
19075// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
19076// `axis`. If `keep_dims` is true, the reduced dimensions are
19077// retained with length 1.
19078//
19079// Arguments:
19080//	input: The tensor to reduce.
19081//	axis: The dimensions to reduce. Must be in the range
19082// `[-rank(input), rank(input))`.
19083//
19084// Returns The reduced tensor.
19085func Any(scope *Scope, input tf.Output, axis tf.Output, optional ...AnyAttr) (output tf.Output) {
19086	if scope.Err() != nil {
19087		return
19088	}
19089	attrs := map[string]interface{}{}
19090	for _, a := range optional {
19091		a(attrs)
19092	}
19093	opspec := tf.OpSpec{
19094		Type: "Any",
19095		Input: []tf.Input{
19096			input, axis,
19097		},
19098		Attrs: attrs,
19099	}
19100	op := scope.AddOperation(opspec)
19101	return op.Output(0)
19102}
19103
19104// Computes the gradient of morphological 2-D dilation with respect to the input.
19105//
19106// Arguments:
19107//	input: 4-D with shape `[batch, in_height, in_width, depth]`.
19108//	filter: 3-D with shape `[filter_height, filter_width, depth]`.
19109//	out_backprop: 4-D with shape `[batch, out_height, out_width, depth]`.
19110//	strides: 1-D of length 4. The stride of the sliding window for each dimension of
19111// the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
19112//	rates: 1-D of length 4. The input stride for atrous morphological dilation.
19113// Must be: `[1, rate_height, rate_width, 1]`.
19114//	padding: The type of padding algorithm to use.
19115//
19116// Returns 4-D with shape `[batch, in_height, in_width, depth]`.
19117func Dilation2DBackpropInput(scope *Scope, input tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, rates []int64, padding string) (in_backprop tf.Output) {
19118	if scope.Err() != nil {
19119		return
19120	}
19121	attrs := map[string]interface{}{"strides": strides, "rates": rates, "padding": padding}
19122	opspec := tf.OpSpec{
19123		Type: "Dilation2DBackpropInput",
19124		Input: []tf.Input{
19125			input, filter, out_backprop,
19126		},
19127		Attrs: attrs,
19128	}
19129	op := scope.AddOperation(opspec)
19130	return op.Output(0)
19131}
19132
19133// AllAttr is an optional argument to All.
19134type AllAttr func(optionalAttr)
19135
19136// AllKeepDims sets the optional keep_dims attribute to value.
19137//
19138// value: If true, retain reduced dimensions with length 1.
19139// If not specified, defaults to false
19140func AllKeepDims(value bool) AllAttr {
19141	return func(m optionalAttr) {
19142		m["keep_dims"] = value
19143	}
19144}
19145
19146// Computes the "logical and" of elements across dimensions of a tensor.
19147//
19148// Reduces `input` along the dimensions given in `axis`. Unless
19149// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
19150// `axis`. If `keep_dims` is true, the reduced dimensions are
19151// retained with length 1.
19152//
19153// Arguments:
19154//	input: The tensor to reduce.
19155//	axis: The dimensions to reduce. Must be in the range
19156// `[-rank(input), rank(input))`.
19157//
19158// Returns The reduced tensor.
19159func All(scope *Scope, input tf.Output, axis tf.Output, optional ...AllAttr) (output tf.Output) {
19160	if scope.Err() != nil {
19161		return
19162	}
19163	attrs := map[string]interface{}{}
19164	for _, a := range optional {
19165		a(attrs)
19166	}
19167	opspec := tf.OpSpec{
19168		Type: "All",
19169		Input: []tf.Input{
19170			input, axis,
19171		},
19172		Attrs: attrs,
19173	}
19174	op := scope.AddOperation(opspec)
19175	return op.Output(0)
19176}
19177
19178// Computes gradients for SparseSegmentSqrtN.
19179//
19180// Returns tensor "output" with same shape as grad, except for dimension 0 whose
19181// value is output_dim0.
19182//
19183// Arguments:
19184//	grad: gradient propagated to the SparseSegmentSqrtN op.
19185//	indices: indices passed to the corresponding SparseSegmentSqrtN op.
19186//	segment_ids: segment_ids passed to the corresponding SparseSegmentSqrtN op.
19187//	output_dim0: dimension 0 of "data" passed to SparseSegmentSqrtN op.
19188func SparseSegmentSqrtNGrad(scope *Scope, grad tf.Output, indices tf.Output, segment_ids tf.Output, output_dim0 tf.Output) (output tf.Output) {
19189	if scope.Err() != nil {
19190		return
19191	}
19192	opspec := tf.OpSpec{
19193		Type: "SparseSegmentSqrtNGrad",
19194		Input: []tf.Input{
19195			grad, indices, segment_ids, output_dim0,
19196		},
19197	}
19198	op := scope.AddOperation(opspec)
19199	return op.Output(0)
19200}
19201
19202// Computes the mean along sparse segments of a tensor.
19203//
19204// See `tf.sparse.segment_sum` for usage examples.
19205//
19206// Like `SegmentMean`, but `segment_ids` can have rank less than `data`'s first
19207// dimension, selecting a subset of dimension 0, specified by `indices`.
19208//
19209// Arguments:
19210//
19211//	indices: A 1-D tensor. Has same rank as `segment_ids`.
19212//	segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
19213//
19214// Returns Has same shape as data, except for dimension 0 which
19215// has size `k`, the number of segments.
19216func SparseSegmentMean(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output) (output tf.Output) {
19217	if scope.Err() != nil {
19218		return
19219	}
19220	opspec := tf.OpSpec{
19221		Type: "SparseSegmentMean",
19222		Input: []tf.Input{
19223			data, indices, segment_ids,
19224		},
19225	}
19226	op := scope.AddOperation(opspec)
19227	return op.Output(0)
19228}
19229
19230// Computes the sum along sparse segments of a tensor.
19231//
19232// Like `SparseSegmentSum`, but allows missing ids in `segment_ids`. If an id is
19233// missing, the `output` tensor at that position will be zeroed.
19234//
19235// Read
19236// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/sparse#Segmentation)
19237// for an explanation of segments.
19238//
19239// For example:
19240//
19241// ```python
19242// c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
19243//
19244// tf.sparse_segment_sum_with_num_segments(
19245//     c, tf.constant([0, 1]), tf.constant([0, 0]), num_segments=3)
19246// # => [[0 0 0 0]
19247// #     [0 0 0 0]
19248// #     [0 0 0 0]]
19249//
19250// tf.sparse_segment_sum_with_num_segments(c,
19251//                                         tf.constant([0, 1]),
19252//                                         tf.constant([0, 2],
19253//                                         num_segments=4))
19254// # => [[ 1  2  3  4]
19255// #     [ 0  0  0  0]
19256// #     [-1 -2 -3 -4]
19257// #     [ 0  0  0  0]]
19258// ```
19259//
19260// Arguments:
19261//
19262//	indices: A 1-D tensor. Has same rank as `segment_ids`.
19263//	segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
19264//	num_segments: Should equal the number of distinct segment IDs.
19265//
19266// Returns Has same shape as data, except for dimension 0 which
19267// has size `num_segments`.
19268func SparseSegmentSumWithNumSegments(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output) {
19269	if scope.Err() != nil {
19270		return
19271	}
19272	opspec := tf.OpSpec{
19273		Type: "SparseSegmentSumWithNumSegments",
19274		Input: []tf.Input{
19275			data, indices, segment_ids, num_segments,
19276		},
19277	}
19278	op := scope.AddOperation(opspec)
19279	return op.Output(0)
19280}
19281
19282// CollectiveReduceV2Attr is an optional argument to CollectiveReduceV2.
19283type CollectiveReduceV2Attr func(optionalAttr)
19284
19285// CollectiveReduceV2CommunicationHint sets the optional communication_hint attribute to value.
19286// If not specified, defaults to "auto"
19287func CollectiveReduceV2CommunicationHint(value string) CollectiveReduceV2Attr {
19288	return func(m optionalAttr) {
19289		m["communication_hint"] = value
19290	}
19291}
19292
19293// CollectiveReduceV2TimeoutSeconds sets the optional timeout_seconds attribute to value.
19294// If not specified, defaults to 0
19295func CollectiveReduceV2TimeoutSeconds(value float32) CollectiveReduceV2Attr {
19296	return func(m optionalAttr) {
19297		m["timeout_seconds"] = value
19298	}
19299}
19300
19301// Mutually reduces multiple tensors of identical type and shape.
19302func CollectiveReduceV2(scope *Scope, input tf.Output, group_size tf.Output, group_key tf.Output, instance_key tf.Output, ordering_token []tf.Output, merge_op string, final_op string, optional ...CollectiveReduceV2Attr) (data tf.Output) {
19303	if scope.Err() != nil {
19304		return
19305	}
19306	attrs := map[string]interface{}{"merge_op": merge_op, "final_op": final_op}
19307	for _, a := range optional {
19308		a(attrs)
19309	}
19310	opspec := tf.OpSpec{
19311		Type: "CollectiveReduceV2",
19312		Input: []tf.Input{
19313			input, group_size, group_key, instance_key, tf.OutputList(ordering_token),
19314		},
19315		Attrs: attrs,
19316	}
19317	op := scope.AddOperation(opspec)
19318	return op.Output(0)
19319}
19320
19321// Computes the sum along segments of a tensor.
19322//
19323// Read
19324// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
19325// for an explanation of segments.
19326//
19327// Computes a tensor such that
19328// \\(output[i] = \sum_{j...} data[j...]\\) where the sum is over tuples `j...` such
19329// that `segment_ids[j...] == i`.  Unlike `SegmentSum`, `segment_ids`
19330// need not be sorted and need not cover all values in the full
19331// range of valid values.
19332//
19333// If the sum is empty for a given segment ID `i`, `output[i] = 0`.
19334// If the given segment ID `i` is negative, the value is dropped and will not be
19335// added to the sum of the segment.
19336//
19337// `num_segments` should equal the number of distinct segment IDs.
19338//
19339// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
19340// <img style="width:100%" src="https://www.tensorflow.org/images/UnsortedSegmentSum.png" alt>
19341// </div>
19342//
19343// ``` python
19344// c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]])
19345// tf.unsorted_segment_sum(c, tf.constant([0, 1, 0]), num_segments=2)
19346// # ==> [[ 5,  5, 5, 5],
19347// #       [5,  6, 7, 8]]
19348// ```
19349//
19350//
19351// Arguments:
19352//
19353//	segment_ids: A tensor whose shape is a prefix of `data.shape`.
19354//
19355//
19356// Returns Has same shape as data, except for the first `segment_ids.rank`
19357// dimensions, which are replaced with a single dimension which has size
19358// `num_segments`.
19359func UnsortedSegmentSum(scope *Scope, data tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output) {
19360	if scope.Err() != nil {
19361		return
19362	}
19363	opspec := tf.OpSpec{
19364		Type: "UnsortedSegmentSum",
19365		Input: []tf.Input{
19366			data, segment_ids, num_segments,
19367		},
19368	}
19369	op := scope.AddOperation(opspec)
19370	return op.Output(0)
19371}
19372
19373// Computes arctangent of `y/x` element-wise, respecting signs of the arguments.
19374//
19375// This is the angle \( \theta \in [-\pi, \pi] \) such that
19376// \[ x = r \cos(\theta) \]
19377// and
19378// \[ y = r \sin(\theta) \]
19379// where \(r = \sqrt(x^2 + y^2) \).
19380func Atan2(scope *Scope, y tf.Output, x tf.Output) (z tf.Output) {
19381	if scope.Err() != nil {
19382		return
19383	}
19384	opspec := tf.OpSpec{
19385		Type: "Atan2",
19386		Input: []tf.Input{
19387			y, x,
19388		},
19389	}
19390	op := scope.AddOperation(opspec)
19391	return op.Output(0)
19392}
19393
19394// Computes the product along segments of a tensor.
19395//
19396// Read
19397// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
19398// for an explanation of segments.
19399//
19400// Computes a tensor such that
19401// \\(output_i = \prod_j data_j\\) where the product is over `j` such
19402// that `segment_ids[j] == i`.
19403//
19404// If the product is empty for a given segment ID `i`, `output[i] = 1`.
19405//
19406// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
19407// <img style="width:100%" src="https://www.tensorflow.org/images/SegmentProd.png" alt>
19408// </div>
19409//
19410// For example:
19411//
19412// ```
19413// c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]])
19414// tf.segment_prod(c, tf.constant([0, 0, 1]))
19415// # ==> [[4, 6, 6, 4],
19416// #      [5, 6, 7, 8]]
19417// ```
19418//
19419//
19420// Arguments:
19421//
19422//	segment_ids: A 1-D tensor whose size is equal to the size of `data`'s
19423// first dimension.  Values should be sorted and can be repeated.
19424//
19425// Returns Has same shape as data, except for dimension 0 which
19426// has size `k`, the number of segments.
19427func SegmentProd(scope *Scope, data tf.Output, segment_ids tf.Output) (output tf.Output) {
19428	if scope.Err() != nil {
19429		return
19430	}
19431	opspec := tf.OpSpec{
19432		Type: "SegmentProd",
19433		Input: []tf.Input{
19434			data, segment_ids,
19435		},
19436	}
19437	op := scope.AddOperation(opspec)
19438	return op.Output(0)
19439}
19440
19441// ArgMinAttr is an optional argument to ArgMin.
19442type ArgMinAttr func(optionalAttr)
19443
19444// ArgMinOutputType sets the optional output_type attribute to value.
19445// If not specified, defaults to DT_INT64
19446func ArgMinOutputType(value tf.DataType) ArgMinAttr {
19447	return func(m optionalAttr) {
19448		m["output_type"] = value
19449	}
19450}
19451
19452// Returns the index with the smallest value across dimensions of a tensor.
19453//
19454// Note that in case of ties the identity of the return value is not guaranteed.
19455//
19456// Usage:
19457//   ```python
19458//   import tensorflow as tf
19459//   a = [1, 10, 26.9, 2.8, 166.32, 62.3]
19460//   b = tf.math.argmin(input = a)
19461//   c = tf.keras.backend.eval(b)
19462//   # c = 0
19463//   # here a[0] = 1 which is the smallest element of a across axis 0
19464//   ```
19465//
19466// Arguments:
19467//
19468//	dimension: int32 or int64, must be in the range `[-rank(input), rank(input))`.
19469// Describes which dimension of the input Tensor to reduce across. For vectors,
19470// use dimension = 0.
19471func ArgMin(scope *Scope, input tf.Output, dimension tf.Output, optional ...ArgMinAttr) (output tf.Output) {
19472	if scope.Err() != nil {
19473		return
19474	}
19475	attrs := map[string]interface{}{}
19476	for _, a := range optional {
19477		a(attrs)
19478	}
19479	opspec := tf.OpSpec{
19480		Type: "ArgMin",
19481		Input: []tf.Input{
19482			input, dimension,
19483		},
19484		Attrs: attrs,
19485	}
19486	op := scope.AddOperation(opspec)
19487	return op.Output(0)
19488}
19489
19490// Reshapes a tensor.
19491//
19492// Given `tensor`, this operation returns a tensor that has the same values
19493// as `tensor` with shape `shape`.
19494//
19495// If one component of 1-D tensor `shape` is the special value -1, the size of that
19496// dimension is computed so that the total size remains constant.  In particular, a
19497// `shape` of `[-1]` flattens into 1-D.  At most one component of `shape` may be
19498// unknown.
19499//
19500// The `shape` must be 1-D and the operation returns a tensor with shape
19501// `shape` filled with the values of `tensor`. In this case, the number of elements
19502// implied by `shape` must be the same as the number of elements in `tensor`.
19503//
19504// It is an error if `shape` is not 1-D.
19505//
19506// For example:
19507//
19508// ```
19509// # tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9]
19510// # tensor 't' has shape [9]
19511// reshape(t, [3, 3]) ==> [[1, 2, 3],
19512//                         [4, 5, 6],
19513//                         [7, 8, 9]]
19514//
19515// # tensor 't' is [[[1, 1], [2, 2]],
19516// #                [[3, 3], [4, 4]]]
19517// # tensor 't' has shape [2, 2, 2]
19518// reshape(t, [2, 4]) ==> [[1, 1, 2, 2],
19519//                         [3, 3, 4, 4]]
19520//
19521// # tensor 't' is [[[1, 1, 1],
19522// #                 [2, 2, 2]],
19523// #                [[3, 3, 3],
19524// #                 [4, 4, 4]],
19525// #                [[5, 5, 5],
19526// #                 [6, 6, 6]]]
19527// # tensor 't' has shape [3, 2, 3]
19528// # pass '[-1]' to flatten 't'
19529// reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]
19530//
19531// # -1 can also be used to infer the shape
19532//
19533// # -1 is inferred to be 9:
19534// reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
19535//                          [4, 4, 4, 5, 5, 5, 6, 6, 6]]
19536// # -1 is inferred to be 2:
19537// reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
19538//                          [4, 4, 4, 5, 5, 5, 6, 6, 6]]
19539// # -1 is inferred to be 3:
19540// reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1],
19541//                               [2, 2, 2],
19542//                               [3, 3, 3]],
19543//                              [[4, 4, 4],
19544//                               [5, 5, 5],
19545//                               [6, 6, 6]]]
19546//
19547// # tensor 't' is [7]
19548// # shape `[]` reshapes to a scalar
19549// reshape(t, []) ==> 7
19550// ```
19551//
19552// Arguments:
19553//
19554//	shape: Defines the shape of the output tensor.
19555func Reshape(scope *Scope, tensor tf.Output, shape tf.Output) (output tf.Output) {
19556	if scope.Err() != nil {
19557		return
19558	}
19559	opspec := tf.OpSpec{
19560		Type: "Reshape",
19561		Input: []tf.Input{
19562			tensor, shape,
19563		},
19564	}
19565	op := scope.AddOperation(opspec)
19566	return op.Output(0)
19567}
19568
19569// SnapshotDatasetAttr is an optional argument to SnapshotDataset.
19570type SnapshotDatasetAttr func(optionalAttr)
19571
19572// SnapshotDatasetCompression sets the optional compression attribute to value.
19573// If not specified, defaults to ""
19574func SnapshotDatasetCompression(value string) SnapshotDatasetAttr {
19575	return func(m optionalAttr) {
19576		m["compression"] = value
19577	}
19578}
19579
19580// SnapshotDatasetReaderPathPrefix sets the optional reader_path_prefix attribute to value.
19581// If not specified, defaults to ""
19582func SnapshotDatasetReaderPathPrefix(value string) SnapshotDatasetAttr {
19583	return func(m optionalAttr) {
19584		m["reader_path_prefix"] = value
19585	}
19586}
19587
19588// SnapshotDatasetWriterPathPrefix sets the optional writer_path_prefix attribute to value.
19589// If not specified, defaults to ""
19590func SnapshotDatasetWriterPathPrefix(value string) SnapshotDatasetAttr {
19591	return func(m optionalAttr) {
19592		m["writer_path_prefix"] = value
19593	}
19594}
19595
19596// SnapshotDatasetShardSizeBytes sets the optional shard_size_bytes attribute to value.
19597// If not specified, defaults to 10737418240
19598func SnapshotDatasetShardSizeBytes(value int64) SnapshotDatasetAttr {
19599	return func(m optionalAttr) {
19600		m["shard_size_bytes"] = value
19601	}
19602}
19603
19604// SnapshotDatasetPendingSnapshotExpirySeconds sets the optional pending_snapshot_expiry_seconds attribute to value.
19605// If not specified, defaults to 86400
19606func SnapshotDatasetPendingSnapshotExpirySeconds(value int64) SnapshotDatasetAttr {
19607	return func(m optionalAttr) {
19608		m["pending_snapshot_expiry_seconds"] = value
19609	}
19610}
19611
19612// SnapshotDatasetNumReaderThreads sets the optional num_reader_threads attribute to value.
19613// If not specified, defaults to 1
19614func SnapshotDatasetNumReaderThreads(value int64) SnapshotDatasetAttr {
19615	return func(m optionalAttr) {
19616		m["num_reader_threads"] = value
19617	}
19618}
19619
19620// SnapshotDatasetReaderBufferSize sets the optional reader_buffer_size attribute to value.
19621// If not specified, defaults to 1
19622func SnapshotDatasetReaderBufferSize(value int64) SnapshotDatasetAttr {
19623	return func(m optionalAttr) {
19624		m["reader_buffer_size"] = value
19625	}
19626}
19627
19628// SnapshotDatasetNumWriterThreads sets the optional num_writer_threads attribute to value.
19629// If not specified, defaults to 1
19630func SnapshotDatasetNumWriterThreads(value int64) SnapshotDatasetAttr {
19631	return func(m optionalAttr) {
19632		m["num_writer_threads"] = value
19633	}
19634}
19635
19636// SnapshotDatasetWriterBufferSize sets the optional writer_buffer_size attribute to value.
19637// If not specified, defaults to 1
19638func SnapshotDatasetWriterBufferSize(value int64) SnapshotDatasetAttr {
19639	return func(m optionalAttr) {
19640		m["writer_buffer_size"] = value
19641	}
19642}
19643
19644// SnapshotDatasetShuffleOnRead sets the optional shuffle_on_read attribute to value.
19645// If not specified, defaults to false
19646func SnapshotDatasetShuffleOnRead(value bool) SnapshotDatasetAttr {
19647	return func(m optionalAttr) {
19648		m["shuffle_on_read"] = value
19649	}
19650}
19651
19652// SnapshotDatasetSeed sets the optional seed attribute to value.
19653// If not specified, defaults to 0
19654func SnapshotDatasetSeed(value int64) SnapshotDatasetAttr {
19655	return func(m optionalAttr) {
19656		m["seed"] = value
19657	}
19658}
19659
19660// SnapshotDatasetSeed2 sets the optional seed2 attribute to value.
19661// If not specified, defaults to 0
19662func SnapshotDatasetSeed2(value int64) SnapshotDatasetAttr {
19663	return func(m optionalAttr) {
19664		m["seed2"] = value
19665	}
19666}
19667
19668// SnapshotDatasetMode sets the optional mode attribute to value.
19669// If not specified, defaults to "auto"
19670func SnapshotDatasetMode(value string) SnapshotDatasetAttr {
19671	return func(m optionalAttr) {
19672		m["mode"] = value
19673	}
19674}
19675
19676// SnapshotDatasetSnapshotName sets the optional snapshot_name attribute to value.
19677// If not specified, defaults to ""
19678func SnapshotDatasetSnapshotName(value string) SnapshotDatasetAttr {
19679	return func(m optionalAttr) {
19680		m["snapshot_name"] = value
19681	}
19682}
19683
19684// Creates a dataset that will write to / read from a snapshot.
19685//
19686// This dataset attempts to determine whether a valid snapshot exists at the
19687// `snapshot_path`, and reads from the snapshot in lieu of using `input_dataset`.
19688// If not, it will run the preprocessing pipeline as usual, and write out a
19689// snapshot of the data processed for future use.
19690//
19691// Arguments:
19692//	input_dataset: A variant tensor representing the input dataset.
19693//	path: The path we should write snapshots to / read snapshots from.
19694//
19695//
19696func SnapshotDataset(scope *Scope, input_dataset tf.Output, path tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...SnapshotDatasetAttr) (handle tf.Output) {
19697	if scope.Err() != nil {
19698		return
19699	}
19700	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
19701	for _, a := range optional {
19702		a(attrs)
19703	}
19704	opspec := tf.OpSpec{
19705		Type: "SnapshotDataset",
19706		Input: []tf.Input{
19707			input_dataset, path,
19708		},
19709		Attrs: attrs,
19710	}
19711	op := scope.AddOperation(opspec)
19712	return op.Output(0)
19713}
19714
19715// ArgMaxAttr is an optional argument to ArgMax.
19716type ArgMaxAttr func(optionalAttr)
19717
19718// ArgMaxOutputType sets the optional output_type attribute to value.
19719// If not specified, defaults to DT_INT64
19720func ArgMaxOutputType(value tf.DataType) ArgMaxAttr {
19721	return func(m optionalAttr) {
19722		m["output_type"] = value
19723	}
19724}
19725
19726// Returns the index with the largest value across dimensions of a tensor.
19727//
19728// Note that in case of ties the identity of the return value is not guaranteed.
19729//
19730// Usage:
19731//   ```python
19732//   import tensorflow as tf
19733//   a = [1, 10, 26.9, 2.8, 166.32, 62.3]
19734//   b = tf.math.argmax(input = a)
19735//   c = tf.keras.backend.eval(b)
19736//   # c = 4
19737//   # here a[4] = 166.32 which is the largest element of a across axis 0
19738//   ```
19739//
19740// Arguments:
19741//
19742//	dimension: int32 or int64, must be in the range `[-rank(input), rank(input))`.
19743// Describes which dimension of the input Tensor to reduce across. For vectors,
19744// use dimension = 0.
19745func ArgMax(scope *Scope, input tf.Output, dimension tf.Output, optional ...ArgMaxAttr) (output tf.Output) {
19746	if scope.Err() != nil {
19747		return
19748	}
19749	attrs := map[string]interface{}{}
19750	for _, a := range optional {
19751		a(attrs)
19752	}
19753	opspec := tf.OpSpec{
19754		Type: "ArgMax",
19755		Input: []tf.Input{
19756			input, dimension,
19757		},
19758		Attrs: attrs,
19759	}
19760	op := scope.AddOperation(opspec)
19761	return op.Output(0)
19762}
19763
19764// ResizeBilinearGradAttr is an optional argument to ResizeBilinearGrad.
19765type ResizeBilinearGradAttr func(optionalAttr)
19766
19767// ResizeBilinearGradAlignCorners sets the optional align_corners attribute to value.
19768//
19769// value: If true, the centers of the 4 corner pixels of the input and grad tensors are
19770// aligned. Defaults to false.
19771// If not specified, defaults to false
19772func ResizeBilinearGradAlignCorners(value bool) ResizeBilinearGradAttr {
19773	return func(m optionalAttr) {
19774		m["align_corners"] = value
19775	}
19776}
19777
19778// ResizeBilinearGradHalfPixelCenters sets the optional half_pixel_centers attribute to value.
19779// If not specified, defaults to false
19780func ResizeBilinearGradHalfPixelCenters(value bool) ResizeBilinearGradAttr {
19781	return func(m optionalAttr) {
19782		m["half_pixel_centers"] = value
19783	}
19784}
19785
19786// Computes the gradient of bilinear interpolation.
19787//
19788// Arguments:
19789//	grads: 4-D with shape `[batch, height, width, channels]`.
19790//	original_image: 4-D with shape `[batch, orig_height, orig_width, channels]`,
19791// The image tensor that was resized.
19792//
19793// Returns 4-D with shape `[batch, orig_height, orig_width, channels]`.
19794// Gradients with respect to the input image. Input image must have been
19795// float or double.
19796func ResizeBilinearGrad(scope *Scope, grads tf.Output, original_image tf.Output, optional ...ResizeBilinearGradAttr) (output tf.Output) {
19797	if scope.Err() != nil {
19798		return
19799	}
19800	attrs := map[string]interface{}{}
19801	for _, a := range optional {
19802		a(attrs)
19803	}
19804	opspec := tf.OpSpec{
19805		Type: "ResizeBilinearGrad",
19806		Input: []tf.Input{
19807			grads, original_image,
19808		},
19809		Attrs: attrs,
19810	}
19811	op := scope.AddOperation(opspec)
19812	return op.Output(0)
19813}
19814
19815// MaxAttr is an optional argument to Max.
19816type MaxAttr func(optionalAttr)
19817
19818// MaxKeepDims sets the optional keep_dims attribute to value.
19819//
19820// value: If true, retain reduced dimensions with length 1.
19821// If not specified, defaults to false
19822func MaxKeepDims(value bool) MaxAttr {
19823	return func(m optionalAttr) {
19824		m["keep_dims"] = value
19825	}
19826}
19827
19828// Computes the maximum of elements across dimensions of a tensor.
19829//
19830// Reduces `input` along the dimensions given in `axis`. Unless
19831// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
19832// `axis`. If `keep_dims` is true, the reduced dimensions are
19833// retained with length 1.
19834//
19835// Arguments:
19836//	input: The tensor to reduce.
19837//	axis: The dimensions to reduce. Must be in the range
19838// `[-rank(input), rank(input))`.
19839//
19840// Returns The reduced tensor.
19841func Max(scope *Scope, input tf.Output, axis tf.Output, optional ...MaxAttr) (output tf.Output) {
19842	if scope.Err() != nil {
19843		return
19844	}
19845	attrs := map[string]interface{}{}
19846	for _, a := range optional {
19847		a(attrs)
19848	}
19849	opspec := tf.OpSpec{
19850		Type: "Max",
19851		Input: []tf.Input{
19852			input, axis,
19853		},
19854		Attrs: attrs,
19855	}
19856	op := scope.AddOperation(opspec)
19857	return op.Output(0)
19858}
19859
19860// SampleDistortedBoundingBoxV2Attr is an optional argument to SampleDistortedBoundingBoxV2.
19861type SampleDistortedBoundingBoxV2Attr func(optionalAttr)
19862
19863// SampleDistortedBoundingBoxV2Seed sets the optional seed attribute to value.
19864//
19865// value: If either `seed` or `seed2` are set to non-zero, the random number
19866// generator is seeded by the given `seed`.  Otherwise, it is seeded by a random
19867// seed.
19868// If not specified, defaults to 0
19869func SampleDistortedBoundingBoxV2Seed(value int64) SampleDistortedBoundingBoxV2Attr {
19870	return func(m optionalAttr) {
19871		m["seed"] = value
19872	}
19873}
19874
19875// SampleDistortedBoundingBoxV2Seed2 sets the optional seed2 attribute to value.
19876//
19877// value: A second seed to avoid seed collision.
19878// If not specified, defaults to 0
19879func SampleDistortedBoundingBoxV2Seed2(value int64) SampleDistortedBoundingBoxV2Attr {
19880	return func(m optionalAttr) {
19881		m["seed2"] = value
19882	}
19883}
19884
19885// SampleDistortedBoundingBoxV2AspectRatioRange sets the optional aspect_ratio_range attribute to value.
19886//
19887// value: The cropped area of the image must have an aspect ratio =
19888// width / height within this range.
19889// If not specified, defaults to <f:0.75 f:1.33 >
19890func SampleDistortedBoundingBoxV2AspectRatioRange(value []float32) SampleDistortedBoundingBoxV2Attr {
19891	return func(m optionalAttr) {
19892		m["aspect_ratio_range"] = value
19893	}
19894}
19895
19896// SampleDistortedBoundingBoxV2AreaRange sets the optional area_range attribute to value.
19897//
19898// value: The cropped area of the image must contain a fraction of the
19899// supplied image within this range.
19900// If not specified, defaults to <f:0.05 f:1 >
19901func SampleDistortedBoundingBoxV2AreaRange(value []float32) SampleDistortedBoundingBoxV2Attr {
19902	return func(m optionalAttr) {
19903		m["area_range"] = value
19904	}
19905}
19906
19907// SampleDistortedBoundingBoxV2MaxAttempts sets the optional max_attempts attribute to value.
19908//
19909// value: Number of attempts at generating a cropped region of the image
19910// of the specified constraints. After `max_attempts` failures, return the entire
19911// image.
19912// If not specified, defaults to 100
19913func SampleDistortedBoundingBoxV2MaxAttempts(value int64) SampleDistortedBoundingBoxV2Attr {
19914	return func(m optionalAttr) {
19915		m["max_attempts"] = value
19916	}
19917}
19918
19919// SampleDistortedBoundingBoxV2UseImageIfNoBoundingBoxes sets the optional use_image_if_no_bounding_boxes attribute to value.
19920//
19921// value: Controls behavior if no bounding boxes supplied.
19922// If true, assume an implicit bounding box covering the whole input. If false,
19923// raise an error.
19924// If not specified, defaults to false
19925func SampleDistortedBoundingBoxV2UseImageIfNoBoundingBoxes(value bool) SampleDistortedBoundingBoxV2Attr {
19926	return func(m optionalAttr) {
19927		m["use_image_if_no_bounding_boxes"] = value
19928	}
19929}
19930
19931// Generate a single randomly distorted bounding box for an image.
19932//
19933// Bounding box annotations are often supplied in addition to ground-truth labels
19934// in image recognition or object localization tasks. A common technique for
19935// training such a system is to randomly distort an image while preserving
19936// its content, i.e. *data augmentation*. This Op outputs a randomly distorted
19937// localization of an object, i.e. bounding box, given an `image_size`,
19938// `bounding_boxes` and a series of constraints.
19939//
19940// The output of this Op is a single bounding box that may be used to crop the
19941// original image. The output is returned as 3 tensors: `begin`, `size` and
19942// `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the
19943// image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize
19944// what the bounding box looks like.
19945//
19946// Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The
19947// bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
19948// height of the underlying image.
19949//
19950// For example,
19951//
19952// ```python
19953//     # Generate a single distorted bounding box.
19954//     begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(
19955//         tf.shape(image),
19956//         bounding_boxes=bounding_boxes)
19957//
19958//     # Draw the bounding box in an image summary.
19959//     image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
19960//                                                   bbox_for_draw)
19961//     tf.summary.image('images_with_box', image_with_box)
19962//
19963//     # Employ the bounding box to distort the image.
19964//     distorted_image = tf.slice(image, begin, size)
19965// ```
19966//
19967// Note that if no bounding box information is available, setting
19968// `use_image_if_no_bounding_boxes = true` will assume there is a single implicit
19969// bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is
19970// false and no bounding boxes are supplied, an error is raised.
19971//
19972// Arguments:
19973//	image_size: 1-D, containing `[height, width, channels]`.
19974//	bounding_boxes: 3-D with shape `[batch, N, 4]` describing the N bounding boxes
19975// associated with the image.
19976//	min_object_covered: The cropped area of the image must contain at least this
19977// fraction of any bounding box supplied. The value of this parameter should be
19978// non-negative. In the case of 0, the cropped area does not need to overlap
19979// any of the bounding boxes supplied.
19980//
19981// Returns:
19982//	begin: 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to
19983// `tf.slice`.
19984//	size: 1-D, containing `[target_height, target_width, -1]`. Provide as input to
19985// `tf.slice`.
19986//	bboxes: 3-D with shape `[1, 1, 4]` containing the distorted bounding box.
19987// Provide as input to `tf.image.draw_bounding_boxes`.
19988func SampleDistortedBoundingBoxV2(scope *Scope, image_size tf.Output, bounding_boxes tf.Output, min_object_covered tf.Output, optional ...SampleDistortedBoundingBoxV2Attr) (begin tf.Output, size tf.Output, bboxes tf.Output) {
19989	if scope.Err() != nil {
19990		return
19991	}
19992	attrs := map[string]interface{}{}
19993	for _, a := range optional {
19994		a(attrs)
19995	}
19996	opspec := tf.OpSpec{
19997		Type: "SampleDistortedBoundingBoxV2",
19998		Input: []tf.Input{
19999			image_size, bounding_boxes, min_object_covered,
20000		},
20001		Attrs: attrs,
20002	}
20003	op := scope.AddOperation(opspec)
20004	return op.Output(0), op.Output(1), op.Output(2)
20005}
20006
20007// EigAttr is an optional argument to Eig.
20008type EigAttr func(optionalAttr)
20009
20010// EigComputeV sets the optional compute_v attribute to value.
20011//
20012// value: If `True` then eigenvectors will be computed and returned in `v`.
20013// Otherwise, only the eigenvalues will be computed.
20014// If not specified, defaults to true
20015func EigComputeV(value bool) EigAttr {
20016	return func(m optionalAttr) {
20017		m["compute_v"] = value
20018	}
20019}
20020
20021// Computes the eigen decomposition of one or more square matrices.
20022//
20023// Computes the eigenvalues and (optionally) right eigenvectors of each inner matrix in
20024// `input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`. The eigenvalues
20025// are sorted in non-decreasing order.
20026//
20027// ```python
20028// # a is a tensor.
20029// # e is a tensor of eigenvalues.
20030// # v is a tensor of eigenvectors.
20031// e, v = eig(a)
20032// e = eig(a, compute_v=False)
20033// ```
20034//
20035// Arguments:
20036//	input: `Tensor` input of shape `[N, N]`.
20037//
20038//
20039// Returns:
20040//	e: Eigenvalues. Shape is `[N]`.
20041//	v: Eigenvectors. Shape is `[N, N]`.
20042func Eig(scope *Scope, input tf.Output, Tout tf.DataType, optional ...EigAttr) (e tf.Output, v tf.Output) {
20043	if scope.Err() != nil {
20044		return
20045	}
20046	attrs := map[string]interface{}{"Tout": Tout}
20047	for _, a := range optional {
20048		a(attrs)
20049	}
20050	opspec := tf.OpSpec{
20051		Type: "Eig",
20052		Input: []tf.Input{
20053			input,
20054		},
20055		Attrs: attrs,
20056	}
20057	op := scope.AddOperation(opspec)
20058	return op.Output(0), op.Output(1)
20059}
20060
20061// ProdAttr is an optional argument to Prod.
20062type ProdAttr func(optionalAttr)
20063
20064// ProdKeepDims sets the optional keep_dims attribute to value.
20065//
20066// value: If true, retain reduced dimensions with length 1.
20067// If not specified, defaults to false
20068func ProdKeepDims(value bool) ProdAttr {
20069	return func(m optionalAttr) {
20070		m["keep_dims"] = value
20071	}
20072}
20073
20074// Computes the product of elements across dimensions of a tensor.
20075//
20076// Reduces `input` along the dimensions given in `axis`. Unless
20077// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
20078// `axis`. If `keep_dims` is true, the reduced dimensions are
20079// retained with length 1.
20080//
20081// Arguments:
20082//	input: The tensor to reduce.
20083//	axis: The dimensions to reduce. Must be in the range
20084// `[-rank(input), rank(input))`.
20085//
20086// Returns The reduced tensor.
20087func Prod(scope *Scope, input tf.Output, axis tf.Output, optional ...ProdAttr) (output tf.Output) {
20088	if scope.Err() != nil {
20089		return
20090	}
20091	attrs := map[string]interface{}{}
20092	for _, a := range optional {
20093		a(attrs)
20094	}
20095	opspec := tf.OpSpec{
20096		Type: "Prod",
20097		Input: []tf.Input{
20098			input, axis,
20099		},
20100		Attrs: attrs,
20101	}
20102	op := scope.AddOperation(opspec)
20103	return op.Output(0)
20104}
20105
20106// SumAttr is an optional argument to Sum.
20107type SumAttr func(optionalAttr)
20108
20109// SumKeepDims sets the optional keep_dims attribute to value.
20110//
20111// value: If true, retain reduced dimensions with length 1.
20112// If not specified, defaults to false
20113func SumKeepDims(value bool) SumAttr {
20114	return func(m optionalAttr) {
20115		m["keep_dims"] = value
20116	}
20117}
20118
20119// Computes the sum of elements across dimensions of a tensor.
20120//
20121// Reduces `input` along the dimensions given in `axis`. Unless
20122// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
20123// `axis`. If `keep_dims` is true, the reduced dimensions are
20124// retained with length 1.
20125//
20126// Arguments:
20127//	input: The tensor to reduce.
20128//	axis: The dimensions to reduce. Must be in the range
20129// `[-rank(input), rank(input))`.
20130//
20131// Returns The reduced tensor.
20132func Sum(scope *Scope, input tf.Output, axis tf.Output, optional ...SumAttr) (output tf.Output) {
20133	if scope.Err() != nil {
20134		return
20135	}
20136	attrs := map[string]interface{}{}
20137	for _, a := range optional {
20138		a(attrs)
20139	}
20140	opspec := tf.OpSpec{
20141		Type: "Sum",
20142		Input: []tf.Input{
20143			input, axis,
20144		},
20145		Attrs: attrs,
20146	}
20147	op := scope.AddOperation(opspec)
20148	return op.Output(0)
20149}
20150
20151// ShapeNAttr is an optional argument to ShapeN.
20152type ShapeNAttr func(optionalAttr)
20153
20154// ShapeNOutType sets the optional out_type attribute to value.
20155// If not specified, defaults to DT_INT32
20156func ShapeNOutType(value tf.DataType) ShapeNAttr {
20157	return func(m optionalAttr) {
20158		m["out_type"] = value
20159	}
20160}
20161
20162// Returns shape of tensors.
20163//
20164// This operation returns N 1-D integer tensors representing shape of `input[i]s`.
20165func ShapeN(scope *Scope, input []tf.Output, optional ...ShapeNAttr) (output []tf.Output) {
20166	if scope.Err() != nil {
20167		return
20168	}
20169	attrs := map[string]interface{}{}
20170	for _, a := range optional {
20171		a(attrs)
20172	}
20173	opspec := tf.OpSpec{
20174		Type: "ShapeN",
20175		Input: []tf.Input{
20176			tf.OutputList(input),
20177		},
20178		Attrs: attrs,
20179	}
20180	op := scope.AddOperation(opspec)
20181	if scope.Err() != nil {
20182		return
20183	}
20184	var idx int
20185	var err error
20186	if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
20187		scope.UpdateErr("ShapeN", err)
20188		return
20189	}
20190	return output
20191}
20192
20193// Returns the TopK values in the array in sorted order.
20194//
20195// This is a combination of MakeUnique and TopKUnique. The returned top-K will
20196// have its lower bits replaced by iota, thus it will be close to the original
20197// value but not exactly the same. The running time is proportional to the product
20198// of K and the input size. NaNs are never returned. Subnormal numbers are flushed
20199// to zero.
20200func TopKWithUnique(scope *Scope, input tf.Output, k int64) (topk tf.Output, topk_indices tf.Output) {
20201	if scope.Err() != nil {
20202		return
20203	}
20204	attrs := map[string]interface{}{"k": k}
20205	opspec := tf.OpSpec{
20206		Type: "TopKWithUnique",
20207		Input: []tf.Input{
20208			input,
20209		},
20210		Attrs: attrs,
20211	}
20212	op := scope.AddOperation(opspec)
20213	return op.Output(0), op.Output(1)
20214}
20215
20216// ImageSummaryAttr is an optional argument to ImageSummary.
20217type ImageSummaryAttr func(optionalAttr)
20218
20219// ImageSummaryMaxImages sets the optional max_images attribute to value.
20220//
20221// value: Max number of batch elements to generate images for.
20222// If not specified, defaults to 3
20223//
20224// REQUIRES: value >= 1
20225func ImageSummaryMaxImages(value int64) ImageSummaryAttr {
20226	return func(m optionalAttr) {
20227		m["max_images"] = value
20228	}
20229}
20230
20231// ImageSummaryBadColor sets the optional bad_color attribute to value.
20232//
20233// value: Color to use for pixels with non-finite values.
20234// If not specified, defaults to <dtype:DT_UINT8 tensor_shape:<dim:<size:4 > > int_val:255 int_val:0 int_val:0 int_val:255 >
20235func ImageSummaryBadColor(value tf.Tensor) ImageSummaryAttr {
20236	return func(m optionalAttr) {
20237		m["bad_color"] = value
20238	}
20239}
20240
20241// Outputs a `Summary` protocol buffer with images.
20242//
20243// The summary has up to `max_images` summary values containing images. The
20244// images are built from `tensor` which must be 4-D with shape `[batch_size,
20245// height, width, channels]` and where `channels` can be:
20246//
20247// *  1: `tensor` is interpreted as Grayscale.
20248// *  3: `tensor` is interpreted as RGB.
20249// *  4: `tensor` is interpreted as RGBA.
20250//
20251// The images have the same number of channels as the input tensor. For float
20252// input, the values are normalized one image at a time to fit in the range
20253// `[0, 255]`.  `uint8` values are unchanged.  The op uses two different
20254// normalization algorithms:
20255//
20256// *  If the input values are all positive, they are rescaled so the largest one
20257//    is 255.
20258//
20259// *  If any input value is negative, the values are shifted so input value 0.0
20260//    is at 127.  They are then rescaled so that either the smallest value is 0,
20261//    or the largest one is 255.
20262//
20263// The `tag` argument is a scalar `Tensor` of type `string`.  It is used to
20264// build the `tag` of the summary values:
20265//
20266// *  If `max_images` is 1, the summary value tag is '*tag*/image'.
20267// *  If `max_images` is greater than 1, the summary value tags are
20268//    generated sequentially as '*tag*/image/0', '*tag*/image/1', etc.
20269//
20270// The `bad_color` argument is the color to use in the generated images for
20271// non-finite input values.  It is a `uint8` 1-D tensor of length `channels`.
20272// Each element must be in the range `[0, 255]` (It represents the value of a
20273// pixel in the output image).  Non-finite values in the input tensor are
20274// replaced by this tensor in the output image.  The default value is the color
20275// red.
20276//
20277// Arguments:
20278//	tag: Scalar. Used to build the `tag` attribute of the summary values.
20279//	tensor: 4-D of shape `[batch_size, height, width, channels]` where
20280// `channels` is 1, 3, or 4.
20281//
20282// Returns Scalar. Serialized `Summary` protocol buffer.
20283func ImageSummary(scope *Scope, tag tf.Output, tensor tf.Output, optional ...ImageSummaryAttr) (summary tf.Output) {
20284	if scope.Err() != nil {
20285		return
20286	}
20287	attrs := map[string]interface{}{}
20288	for _, a := range optional {
20289		a(attrs)
20290	}
20291	opspec := tf.OpSpec{
20292		Type: "ImageSummary",
20293		Input: []tf.Input{
20294			tag, tensor,
20295		},
20296		Attrs: attrs,
20297	}
20298	op := scope.AddOperation(opspec)
20299	return op.Output(0)
20300}
20301
20302// CollectiveBcastSendAttr is an optional argument to CollectiveBcastSend.
20303type CollectiveBcastSendAttr func(optionalAttr)
20304
20305// CollectiveBcastSendCommunicationHint sets the optional communication_hint attribute to value.
20306// If not specified, defaults to "auto"
20307func CollectiveBcastSendCommunicationHint(value string) CollectiveBcastSendAttr {
20308	return func(m optionalAttr) {
20309		m["communication_hint"] = value
20310	}
20311}
20312
20313// CollectiveBcastSendTimeoutSeconds sets the optional timeout_seconds attribute to value.
20314// If not specified, defaults to 0
20315func CollectiveBcastSendTimeoutSeconds(value float32) CollectiveBcastSendAttr {
20316	return func(m optionalAttr) {
20317		m["timeout_seconds"] = value
20318	}
20319}
20320
20321// Broadcasts a tensor value to one or more other devices.
20322func CollectiveBcastSend(scope *Scope, input tf.Output, group_size int64, group_key int64, instance_key int64, shape tf.Shape, optional ...CollectiveBcastSendAttr) (data tf.Output) {
20323	if scope.Err() != nil {
20324		return
20325	}
20326	attrs := map[string]interface{}{"group_size": group_size, "group_key": group_key, "instance_key": instance_key, "shape": shape}
20327	for _, a := range optional {
20328		a(attrs)
20329	}
20330	opspec := tf.OpSpec{
20331		Type: "CollectiveBcastSend",
20332		Input: []tf.Input{
20333			input,
20334		},
20335		Attrs: attrs,
20336	}
20337	op := scope.AddOperation(opspec)
20338	return op.Output(0)
20339}
20340
20341// CombinedNonMaxSuppressionAttr is an optional argument to CombinedNonMaxSuppression.
20342type CombinedNonMaxSuppressionAttr func(optionalAttr)
20343
20344// CombinedNonMaxSuppressionPadPerClass sets the optional pad_per_class attribute to value.
20345//
20346// value: If false, the output nmsed boxes, scores and classes
20347// are padded/clipped to `max_total_size`. If true, the
20348// output nmsed boxes, scores and classes are padded to be of length
20349// `max_size_per_class`*`num_classes`, unless it exceeds `max_total_size` in
20350// which case it is clipped to `max_total_size`. Defaults to false.
20351// If not specified, defaults to false
20352func CombinedNonMaxSuppressionPadPerClass(value bool) CombinedNonMaxSuppressionAttr {
20353	return func(m optionalAttr) {
20354		m["pad_per_class"] = value
20355	}
20356}
20357
20358// CombinedNonMaxSuppressionClipBoxes sets the optional clip_boxes attribute to value.
20359//
20360// value: If true, assume the box coordinates are between [0, 1] and clip the output boxes
20361// if they fall beyond [0, 1]. If false, do not do clipping and output the box
20362// coordinates as it is.
20363// If not specified, defaults to true
20364func CombinedNonMaxSuppressionClipBoxes(value bool) CombinedNonMaxSuppressionAttr {
20365	return func(m optionalAttr) {
20366		m["clip_boxes"] = value
20367	}
20368}
20369
20370// Greedily selects a subset of bounding boxes in descending order of score,
20371//
20372// This operation performs non_max_suppression on the inputs per batch, across
20373// all classes.
20374// Prunes away boxes that have high intersection-over-union (IOU) overlap
20375// with previously selected boxes.  Bounding boxes are supplied as
20376// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
20377// diagonal pair of box corners and the coordinates can be provided as normalized
20378// (i.e., lying in the interval [0, 1]) or absolute.  Note that this algorithm
20379// is agnostic to where the origin is in the coordinate system. Also note that
20380// this algorithm is invariant to orthogonal transformations and translations
20381// of the coordinate system; thus translating or reflections of the coordinate
20382// system result in the same boxes being selected by the algorithm.
20383// The output of this operation is the final boxes, scores and classes tensor
20384// returned after performing non_max_suppression.
20385//
20386// Arguments:
20387//	boxes: A 4-D float tensor of shape `[batch_size, num_boxes, q, 4]`. If `q` is 1 then
20388// same boxes are used for all classes otherwise, if `q` is equal to number of
20389// classes, class-specific boxes are used.
20390//	scores: A 3-D float tensor of shape `[batch_size, num_boxes, num_classes]`
20391// representing a single score corresponding to each box (each row of boxes).
20392//	max_output_size_per_class: A scalar integer tensor representing the maximum number of
20393// boxes to be selected by non max suppression per class
20394//	max_total_size: An int32 scalar representing the maximum number of boxes retained over all
20395// classes. Note that setting this value to a large number may result in OOM error
20396// depending on the system workload.
20397//	iou_threshold: A 0-D float tensor representing the threshold for deciding whether
20398// boxes overlap too much with respect to IOU.
20399//	score_threshold: A 0-D float tensor representing the threshold for deciding when to remove
20400// boxes based on score.
20401//
20402// Returns:
20403//	nmsed_boxes: A [batch_size, max_detections, 4] float32 tensor
20404// containing the non-max suppressed boxes.
20405//	nmsed_scores: A [batch_size, max_detections] float32 tensor
20406// containing the scores for the boxes.
20407//	nmsed_classes: A [batch_size, max_detections] float32 tensor
20408// containing the classes for the boxes.
20409//	valid_detections: A [batch_size] int32 tensor indicating the number of
20410// valid detections per batch item. Only the top num_detections[i] entries in
20411// nms_boxes[i], nms_scores[i] and nms_class[i] are valid. The rest of the
20412// entries are zero paddings.
20413func CombinedNonMaxSuppression(scope *Scope, boxes tf.Output, scores tf.Output, max_output_size_per_class tf.Output, max_total_size tf.Output, iou_threshold tf.Output, score_threshold tf.Output, optional ...CombinedNonMaxSuppressionAttr) (nmsed_boxes tf.Output, nmsed_scores tf.Output, nmsed_classes tf.Output, valid_detections tf.Output) {
20414	if scope.Err() != nil {
20415		return
20416	}
20417	attrs := map[string]interface{}{}
20418	for _, a := range optional {
20419		a(attrs)
20420	}
20421	opspec := tf.OpSpec{
20422		Type: "CombinedNonMaxSuppression",
20423		Input: []tf.Input{
20424			boxes, scores, max_output_size_per_class, max_total_size, iou_threshold, score_threshold,
20425		},
20426		Attrs: attrs,
20427	}
20428	op := scope.AddOperation(opspec)
20429	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
20430}
20431
20432// Returns the truth value of x AND y element-wise.
20433//
20434// *NOTE*: `LogicalAnd` supports broadcasting. More about broadcasting
20435// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
20436func LogicalAnd(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
20437	if scope.Err() != nil {
20438		return
20439	}
20440	opspec := tf.OpSpec{
20441		Type: "LogicalAnd",
20442		Input: []tf.Input{
20443			x, y,
20444		},
20445	}
20446	op := scope.AddOperation(opspec)
20447	return op.Output(0)
20448}
20449
20450// Writes a graph summary.
20451//
20452// Writes TensorFlow graph `tensor` at `step` using summary `writer`.
20453//
20454// Returns the created operation.
20455func WriteGraphSummary(scope *Scope, writer tf.Output, step tf.Output, tensor tf.Output) (o *tf.Operation) {
20456	if scope.Err() != nil {
20457		return
20458	}
20459	opspec := tf.OpSpec{
20460		Type: "WriteGraphSummary",
20461		Input: []tf.Input{
20462			writer, step, tensor,
20463		},
20464	}
20465	return scope.AddOperation(opspec)
20466}
20467
20468// ApproximateEqualAttr is an optional argument to ApproximateEqual.
20469type ApproximateEqualAttr func(optionalAttr)
20470
20471// ApproximateEqualTolerance sets the optional tolerance attribute to value.
20472// If not specified, defaults to 1e-05
20473func ApproximateEqualTolerance(value float32) ApproximateEqualAttr {
20474	return func(m optionalAttr) {
20475		m["tolerance"] = value
20476	}
20477}
20478
20479// Returns the truth value of abs(x-y) < tolerance element-wise.
20480func ApproximateEqual(scope *Scope, x tf.Output, y tf.Output, optional ...ApproximateEqualAttr) (z tf.Output) {
20481	if scope.Err() != nil {
20482		return
20483	}
20484	attrs := map[string]interface{}{}
20485	for _, a := range optional {
20486		a(attrs)
20487	}
20488	opspec := tf.OpSpec{
20489		Type: "ApproximateEqual",
20490		Input: []tf.Input{
20491			x, y,
20492		},
20493		Attrs: attrs,
20494	}
20495	op := scope.AddOperation(opspec)
20496	return op.Output(0)
20497}
20498
20499// Compute the polygamma function \\(\psi^{(n)}(x)\\).
20500//
20501// The polygamma function is defined as:
20502//
20503//
20504// \\(\psi^{(a)}(x) = \frac{d^a}{dx^a} \psi(x)\\)
20505//
20506// where \\(\psi(x)\\) is the digamma function.
20507// The polygamma function is defined only for non-negative integer orders \\a\\.
20508func Polygamma(scope *Scope, a tf.Output, x tf.Output) (z tf.Output) {
20509	if scope.Err() != nil {
20510		return
20511	}
20512	opspec := tf.OpSpec{
20513		Type: "Polygamma",
20514		Input: []tf.Input{
20515			a, x,
20516		},
20517	}
20518	op := scope.AddOperation(opspec)
20519	return op.Output(0)
20520}
20521
20522// Returns a tensor map with item from given key erased.
20523//
20524// input_handle: the original map
20525// output_handle: the map with value from given key removed
20526// key: the key of the value to be erased
20527func TensorMapErase(scope *Scope, input_handle tf.Output, key tf.Output, value_dtype tf.DataType) (output_handle tf.Output) {
20528	if scope.Err() != nil {
20529		return
20530	}
20531	attrs := map[string]interface{}{"value_dtype": value_dtype}
20532	opspec := tf.OpSpec{
20533		Type: "TensorMapErase",
20534		Input: []tf.Input{
20535			input_handle, key,
20536		},
20537		Attrs: attrs,
20538	}
20539	op := scope.AddOperation(opspec)
20540	return op.Output(0)
20541}
20542
20543// Shuffle dimensions of x according to a permutation.
20544//
20545// The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy:
20546//   `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`
20547func Transpose(scope *Scope, x tf.Output, perm tf.Output) (y tf.Output) {
20548	if scope.Err() != nil {
20549		return
20550	}
20551	opspec := tf.OpSpec{
20552		Type: "Transpose",
20553		Input: []tf.Input{
20554			x, perm,
20555		},
20556	}
20557	op := scope.AddOperation(opspec)
20558	return op.Output(0)
20559}
20560
20561// AssertAttr is an optional argument to Assert.
20562type AssertAttr func(optionalAttr)
20563
20564// AssertSummarize sets the optional summarize attribute to value.
20565//
20566// value: Print this many entries of each tensor.
20567// If not specified, defaults to 3
20568func AssertSummarize(value int64) AssertAttr {
20569	return func(m optionalAttr) {
20570		m["summarize"] = value
20571	}
20572}
20573
20574// Asserts that the given condition is true.
20575//
20576// If `condition` evaluates to false, print the list of tensors in `data`.
20577// `summarize` determines how many entries of the tensors to print.
20578//
20579// Arguments:
20580//	condition: The condition to evaluate.
20581//	data: The tensors to print out when condition is false.
20582//
20583// Returns the created operation.
20584func Assert(scope *Scope, condition tf.Output, data []tf.Output, optional ...AssertAttr) (o *tf.Operation) {
20585	if scope.Err() != nil {
20586		return
20587	}
20588	attrs := map[string]interface{}{}
20589	for _, a := range optional {
20590		a(attrs)
20591	}
20592	opspec := tf.OpSpec{
20593		Type: "Assert",
20594		Input: []tf.Input{
20595			condition, tf.OutputList(data),
20596		},
20597		Attrs: attrs,
20598	}
20599	return scope.AddOperation(opspec)
20600}
20601
20602// Computes the gradient of `igamma(a, x)` wrt `a`.
20603func IgammaGradA(scope *Scope, a tf.Output, x tf.Output) (z tf.Output) {
20604	if scope.Err() != nil {
20605		return
20606	}
20607	opspec := tf.OpSpec{
20608		Type: "IgammaGradA",
20609		Input: []tf.Input{
20610			a, x,
20611		},
20612	}
20613	op := scope.AddOperation(opspec)
20614	return op.Output(0)
20615}
20616
20617// Compute the upper regularized incomplete Gamma function `Q(a, x)`.
20618//
20619// The upper regularized incomplete Gamma function is defined as:
20620//
20621// \\(Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)\\)
20622//
20623// where
20624//
20625// \\(Gamma(a, x) = int_{x}^{\infty} t^{a-1} exp(-t) dt\\)
20626//
20627// is the upper incomplete Gama function.
20628//
20629// Note, above `P(a, x)` (`Igamma`) is the lower regularized complete
20630// Gamma function.
20631func Igammac(scope *Scope, a tf.Output, x tf.Output) (z tf.Output) {
20632	if scope.Err() != nil {
20633		return
20634	}
20635	opspec := tf.OpSpec{
20636		Type: "Igammac",
20637		Input: []tf.Input{
20638			a, x,
20639		},
20640	}
20641	op := scope.AddOperation(opspec)
20642	return op.Output(0)
20643}
20644
20645// Returns element-wise remainder of division. This emulates C semantics in that
20646//
20647// the result here is consistent with a truncating divide. E.g.
20648// `tf.truncatediv(x, y) * y + truncate_mod(x, y) = x`.
20649//
20650// *NOTE*: `Mod` supports broadcasting. More about broadcasting
20651// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
20652func Mod(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
20653	if scope.Err() != nil {
20654		return
20655	}
20656	opspec := tf.OpSpec{
20657		Type: "Mod",
20658		Input: []tf.Input{
20659			x, y,
20660		},
20661	}
20662	op := scope.AddOperation(opspec)
20663	return op.Output(0)
20664}
20665
20666// A substitute for `InterleaveDataset` on a fixed list of `N` datasets.
20667//
20668// Arguments:
20669//	selector_input_dataset: A dataset of scalar `DT_INT64` elements that determines which of the
20670// `N` data inputs should produce the next output element.
20671//	data_input_datasets: `N` datasets with the same type that will be interleaved according to
20672// the values of `selector_input_dataset`.
20673//
20674//
20675func ExperimentalDirectedInterleaveDataset(scope *Scope, selector_input_dataset tf.Output, data_input_datasets []tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
20676	if scope.Err() != nil {
20677		return
20678	}
20679	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
20680	opspec := tf.OpSpec{
20681		Type: "ExperimentalDirectedInterleaveDataset",
20682		Input: []tf.Input{
20683			selector_input_dataset, tf.OutputList(data_input_datasets),
20684		},
20685		Attrs: attrs,
20686	}
20687	op := scope.AddOperation(opspec)
20688	return op.Output(0)
20689}
20690
20691// Returns the min of x and y (i.e. x < y ? x : y) element-wise.
20692//
20693// *NOTE*: `Minimum` supports broadcasting. More about broadcasting
20694// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
20695func Minimum(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
20696	if scope.Err() != nil {
20697		return
20698	}
20699	opspec := tf.OpSpec{
20700		Type: "Minimum",
20701		Input: []tf.Input{
20702			x, y,
20703		},
20704	}
20705	op := scope.AddOperation(opspec)
20706	return op.Output(0)
20707}
20708
20709// Returns the max of x and y (i.e. x > y ? x : y) element-wise.
20710//
20711// *NOTE*: `Maximum` supports broadcasting. More about broadcasting
20712// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
20713func Maximum(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
20714	if scope.Err() != nil {
20715		return
20716	}
20717	opspec := tf.OpSpec{
20718		Type: "Maximum",
20719		Input: []tf.Input{
20720			x, y,
20721		},
20722	}
20723	op := scope.AddOperation(opspec)
20724	return op.Output(0)
20725}
20726
20727// QuantizedResizeBilinearAttr is an optional argument to QuantizedResizeBilinear.
20728type QuantizedResizeBilinearAttr func(optionalAttr)
20729
20730// QuantizedResizeBilinearAlignCorners sets the optional align_corners attribute to value.
20731//
20732// value: If true, the centers of the 4 corner pixels of the input and output tensors are
20733// aligned, preserving the values at the corner pixels. Defaults to false.
20734// If not specified, defaults to false
20735func QuantizedResizeBilinearAlignCorners(value bool) QuantizedResizeBilinearAttr {
20736	return func(m optionalAttr) {
20737		m["align_corners"] = value
20738	}
20739}
20740
20741// QuantizedResizeBilinearHalfPixelCenters sets the optional half_pixel_centers attribute to value.
20742// If not specified, defaults to false
20743func QuantizedResizeBilinearHalfPixelCenters(value bool) QuantizedResizeBilinearAttr {
20744	return func(m optionalAttr) {
20745		m["half_pixel_centers"] = value
20746	}
20747}
20748
20749// Resize quantized `images` to `size` using quantized bilinear interpolation.
20750//
20751// Input images and output images must be quantized types.
20752//
20753// Arguments:
20754//	images: 4-D with shape `[batch, height, width, channels]`.
20755//	size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
20756// new size for the images.
20757//
20758//
20759//
20760// Returns:
20761//	resized_images: 4-D with shape
20762// `[batch, new_height, new_width, channels]`.
20763//	out_min
20764//	out_max
20765func QuantizedResizeBilinear(scope *Scope, images tf.Output, size tf.Output, min tf.Output, max tf.Output, optional ...QuantizedResizeBilinearAttr) (resized_images tf.Output, out_min tf.Output, out_max tf.Output) {
20766	if scope.Err() != nil {
20767		return
20768	}
20769	attrs := map[string]interface{}{}
20770	for _, a := range optional {
20771		a(attrs)
20772	}
20773	opspec := tf.OpSpec{
20774		Type: "QuantizedResizeBilinear",
20775		Input: []tf.Input{
20776			images, size, min, max,
20777		},
20778		Attrs: attrs,
20779	}
20780	op := scope.AddOperation(opspec)
20781	return op.Output(0), op.Output(1), op.Output(2)
20782}
20783
20784// RandomGammaAttr is an optional argument to RandomGamma.
20785type RandomGammaAttr func(optionalAttr)
20786
20787// RandomGammaSeed sets the optional seed attribute to value.
20788//
20789// value: If either `seed` or `seed2` are set to be non-zero, the random number
20790// generator is seeded by the given seed.  Otherwise, it is seeded by a
20791// random seed.
20792// If not specified, defaults to 0
20793func RandomGammaSeed(value int64) RandomGammaAttr {
20794	return func(m optionalAttr) {
20795		m["seed"] = value
20796	}
20797}
20798
20799// RandomGammaSeed2 sets the optional seed2 attribute to value.
20800//
20801// value: A second seed to avoid seed collision.
20802// If not specified, defaults to 0
20803func RandomGammaSeed2(value int64) RandomGammaAttr {
20804	return func(m optionalAttr) {
20805		m["seed2"] = value
20806	}
20807}
20808
20809// Outputs random values from the Gamma distribution(s) described by alpha.
20810//
20811// This op uses the algorithm by Marsaglia et al. to acquire samples via
20812// transformation-rejection from pairs of uniform and normal random variables.
20813// See http://dl.acm.org/citation.cfm?id=358414
20814//
20815// Arguments:
20816//	shape: 1-D integer tensor. Shape of independent samples to draw from each
20817// distribution described by the shape parameters given in alpha.
20818//	alpha: A tensor in which each scalar is a "shape" parameter describing the
20819// associated gamma distribution.
20820//
20821// Returns A tensor with shape `shape + shape(alpha)`. Each slice
20822// `[:, ..., :, i0, i1, ...iN]` contains the samples drawn for
20823// `alpha[i0, i1, ...iN]`. The dtype of the output matches the dtype of alpha.
20824func RandomGamma(scope *Scope, shape tf.Output, alpha tf.Output, optional ...RandomGammaAttr) (output tf.Output) {
20825	if scope.Err() != nil {
20826		return
20827	}
20828	attrs := map[string]interface{}{}
20829	for _, a := range optional {
20830		a(attrs)
20831	}
20832	opspec := tf.OpSpec{
20833		Type: "RandomGamma",
20834		Input: []tf.Input{
20835			shape, alpha,
20836		},
20837		Attrs: attrs,
20838	}
20839	op := scope.AddOperation(opspec)
20840	return op.Output(0)
20841}
20842
20843// Returns 0 if x == 0, and x * log1p(y) otherwise, elementwise.
20844func Xlog1py(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
20845	if scope.Err() != nil {
20846		return
20847	}
20848	opspec := tf.OpSpec{
20849		Type: "Xlog1py",
20850		Input: []tf.Input{
20851			x, y,
20852		},
20853	}
20854	op := scope.AddOperation(opspec)
20855	return op.Output(0)
20856}
20857
20858// Returns 0 if x == 0, and x * log(y) otherwise, elementwise.
20859func Xlogy(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
20860	if scope.Err() != nil {
20861		return
20862	}
20863	opspec := tf.OpSpec{
20864		Type: "Xlogy",
20865		Input: []tf.Input{
20866			x, y,
20867		},
20868	}
20869	op := scope.AddOperation(opspec)
20870	return op.Output(0)
20871}
20872
20873// Increments variable pointed to by 'resource' until it reaches 'limit'.
20874//
20875// Arguments:
20876//	resource: Should be from a scalar `Variable` node.
20877//	limit: If incrementing ref would bring it above limit, instead generates an
20878// 'OutOfRange' error.
20879//
20880//
20881// Returns A copy of the input before increment. If nothing else modifies the
20882// input, the values produced will all be distinct.
20883func ResourceCountUpTo(scope *Scope, resource tf.Output, limit int64, T tf.DataType) (output tf.Output) {
20884	if scope.Err() != nil {
20885		return
20886	}
20887	attrs := map[string]interface{}{"limit": limit, "T": T}
20888	opspec := tf.OpSpec{
20889		Type: "ResourceCountUpTo",
20890		Input: []tf.Input{
20891			resource,
20892		},
20893		Attrs: attrs,
20894	}
20895	op := scope.AddOperation(opspec)
20896	return op.Output(0)
20897}
20898
20899// StatefulStandardNormalAttr is an optional argument to StatefulStandardNormal.
20900type StatefulStandardNormalAttr func(optionalAttr)
20901
20902// StatefulStandardNormalDtype sets the optional dtype attribute to value.
20903//
20904// value: The type of the output.
20905// If not specified, defaults to DT_FLOAT
20906func StatefulStandardNormalDtype(value tf.DataType) StatefulStandardNormalAttr {
20907	return func(m optionalAttr) {
20908		m["dtype"] = value
20909	}
20910}
20911
20912// Outputs random values from a normal distribution. This op is deprecated in favor of op 'StatefulStandardNormalV2'
20913//
20914// DEPRECATED at GraphDef version 29: Use StatefulStandardNormalV2 instead
20915//
20916// The generated values will have mean 0 and standard deviation 1.
20917//
20918// Arguments:
20919//	resource: The handle of the resource variable that stores the state of the RNG.
20920//	shape: The shape of the output tensor.
20921//
20922// Returns A tensor of the specified shape filled with random normal values.
20923func StatefulStandardNormal(scope *Scope, resource tf.Output, shape tf.Output, optional ...StatefulStandardNormalAttr) (output tf.Output) {
20924	if scope.Err() != nil {
20925		return
20926	}
20927	attrs := map[string]interface{}{}
20928	for _, a := range optional {
20929		a(attrs)
20930	}
20931	opspec := tf.OpSpec{
20932		Type: "StatefulStandardNormal",
20933		Input: []tf.Input{
20934			resource, shape,
20935		},
20936		Attrs: attrs,
20937	}
20938	op := scope.AddOperation(opspec)
20939	return op.Output(0)
20940}
20941
20942// Returns x / y element-wise for real types.
20943//
20944// If `x` and `y` are reals, this will return the floating-point division.
20945//
20946// *NOTE*: `Div` supports broadcasting. More about broadcasting
20947// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
20948func RealDiv(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
20949	if scope.Err() != nil {
20950		return
20951	}
20952	opspec := tf.OpSpec{
20953		Type: "RealDiv",
20954		Input: []tf.Input{
20955			x, y,
20956		},
20957	}
20958	op := scope.AddOperation(opspec)
20959	return op.Output(0)
20960}
20961
20962// Returns x / y element-wise for integer types.
20963//
20964// Truncation designates that negative numbers will round fractional quantities
20965// toward zero. I.e. -7 / 5 = -1. This matches C semantics but it is different
20966// than Python semantics. See `FloorDiv` for a division function that matches
20967// Python Semantics.
20968//
20969// *NOTE*: `TruncateDiv` supports broadcasting. More about broadcasting
20970// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
20971func TruncateDiv(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
20972	if scope.Err() != nil {
20973		return
20974	}
20975	opspec := tf.OpSpec{
20976		Type: "TruncateDiv",
20977		Input: []tf.Input{
20978			x, y,
20979		},
20980	}
20981	op := scope.AddOperation(opspec)
20982	return op.Output(0)
20983}
20984
20985// Writes a serialized proto summary.
20986//
20987// Writes `tensor`, a serialized proto at `step` using summary `writer`.
20988//
20989// Returns the created operation.
20990func WriteRawProtoSummary(scope *Scope, writer tf.Output, step tf.Output, tensor tf.Output) (o *tf.Operation) {
20991	if scope.Err() != nil {
20992		return
20993	}
20994	opspec := tf.OpSpec{
20995		Type: "WriteRawProtoSummary",
20996		Input: []tf.Input{
20997			writer, step, tensor,
20998		},
20999	}
21000	return scope.AddOperation(opspec)
21001}
21002
21003// Returns 0 if the denominator is zero.
21004//
21005//
21006// *NOTE*: `DivNoNan` supports broadcasting. More about broadcasting
21007// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
21008func DivNoNan(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
21009	if scope.Err() != nil {
21010		return
21011	}
21012	opspec := tf.OpSpec{
21013		Type: "DivNoNan",
21014		Input: []tf.Input{
21015			x, y,
21016		},
21017	}
21018	op := scope.AddOperation(opspec)
21019	return op.Output(0)
21020}
21021
21022// Scatter `updates` into an existing tensor according to `indices`.
21023//
21024// This operation creates a new tensor by applying sparse `updates` to the passed
21025// in `tensor`.
21026// This operation is very similar to `tf.scatter_nd`, except that the updates are
21027// scattered onto an existing tensor (as opposed to a zero-tensor). If the memory
21028// for the existing tensor cannot be re-used, a copy is made and updated.
21029//
21030// If `indices` contains duplicates, then we pick the last update for the index.
21031//
21032// If an out of bound index is found on CPU, an error is returned.
21033//
21034// **WARNING**: There are some GPU specific semantics for this operation.
21035// - If an out of bound index is found, the index is ignored.
21036// - The order in which updates are applied is nondeterministic, so the output
21037// will be nondeterministic if `indices` contains duplicates.
21038//
21039// `indices` is an integer tensor containing indices into a new tensor of shape
21040// `shape`.
21041//
21042// * `indices` must have at least 2 axes: `(num_updates, index_depth)`.
21043// * The last axis of `indices` is how deep to index into `tensor` so  this index
21044//   depth must be less than the rank of `tensor`: `indices.shape[-1] <= tensor.ndim`
21045//
21046// if `indices.shape[-1] = tensor.rank` this Op indexes and updates scalar elements.
21047// if `indices.shape[-1] < tensor.rank` it indexes and updates slices of the input
21048// `tensor`.
21049//
21050// Each `update` has a rank of `tensor.rank - indices.shape[-1]`.
21051// The overall shape of `updates` is:
21052//
21053// ```
21054// indices.shape[:-1] + tensor.shape[indices.shape[-1]:]
21055// ```
21056//
21057// For usage examples see the python [tf.tensor_scatter_nd_update](
21058// https://www.tensorflow.org/api_docs/python/tf/tensor_scatter_nd_update) function
21059//
21060//
21061// Arguments:
21062//	tensor: Tensor to copy/update.
21063//	indices: Index tensor.
21064//	updates: Updates to scatter into output.
21065//
21066// Returns A new tensor with the given shape and updates applied according
21067// to the indices.
21068func TensorScatterUpdate(scope *Scope, tensor tf.Output, indices tf.Output, updates tf.Output) (output tf.Output) {
21069	if scope.Err() != nil {
21070		return
21071	}
21072	opspec := tf.OpSpec{
21073		Type: "TensorScatterUpdate",
21074		Input: []tf.Input{
21075			tensor, indices, updates,
21076		},
21077	}
21078	op := scope.AddOperation(opspec)
21079	return op.Output(0)
21080}
21081
21082// Creates a dataset that contains `count` elements from the `input_dataset`.
21083//
21084// Arguments:
21085//
21086//	count: A scalar representing the number of elements from the `input_dataset`
21087// that should be taken. A value of `-1` indicates that all of `input_dataset`
21088// is taken.
21089//
21090//
21091func TakeDataset(scope *Scope, input_dataset tf.Output, count tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
21092	if scope.Err() != nil {
21093		return
21094	}
21095	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
21096	opspec := tf.OpSpec{
21097		Type: "TakeDataset",
21098		Input: []tf.Input{
21099			input_dataset, count,
21100		},
21101		Attrs: attrs,
21102	}
21103	op := scope.AddOperation(opspec)
21104	return op.Output(0)
21105}
21106
21107// Returns the last element of the input list as well as a list with all but that element.
21108//
21109// Fails if the list is empty.
21110//
21111// input_handle: the input list
21112// tensor: the withdrawn last element of the list
21113// element_dtype: the type of elements in the list
21114// element_shape: the shape of the output tensor
21115func TensorListPopBack(scope *Scope, input_handle tf.Output, element_shape tf.Output, element_dtype tf.DataType) (output_handle tf.Output, tensor tf.Output) {
21116	if scope.Err() != nil {
21117		return
21118	}
21119	attrs := map[string]interface{}{"element_dtype": element_dtype}
21120	opspec := tf.OpSpec{
21121		Type: "TensorListPopBack",
21122		Input: []tf.Input{
21123			input_handle, element_shape,
21124		},
21125		Attrs: attrs,
21126	}
21127	op := scope.AddOperation(opspec)
21128	return op.Output(0), op.Output(1)
21129}
21130
21131// AsStringAttr is an optional argument to AsString.
21132type AsStringAttr func(optionalAttr)
21133
21134// AsStringPrecision sets the optional precision attribute to value.
21135//
21136// value: The post-decimal precision to use for floating point numbers.
21137// Only used if precision > -1.
21138// If not specified, defaults to -1
21139func AsStringPrecision(value int64) AsStringAttr {
21140	return func(m optionalAttr) {
21141		m["precision"] = value
21142	}
21143}
21144
21145// AsStringScientific sets the optional scientific attribute to value.
21146//
21147// value: Use scientific notation for floating point numbers.
21148// If not specified, defaults to false
21149func AsStringScientific(value bool) AsStringAttr {
21150	return func(m optionalAttr) {
21151		m["scientific"] = value
21152	}
21153}
21154
21155// AsStringShortest sets the optional shortest attribute to value.
21156//
21157// value: Use shortest representation (either scientific or standard) for
21158// floating point numbers.
21159// If not specified, defaults to false
21160func AsStringShortest(value bool) AsStringAttr {
21161	return func(m optionalAttr) {
21162		m["shortest"] = value
21163	}
21164}
21165
21166// AsStringWidth sets the optional width attribute to value.
21167//
21168// value: Pad pre-decimal numbers to this width.
21169// Applies to both floating point and integer numbers.
21170// Only used if width > -1.
21171// If not specified, defaults to -1
21172func AsStringWidth(value int64) AsStringAttr {
21173	return func(m optionalAttr) {
21174		m["width"] = value
21175	}
21176}
21177
21178// AsStringFill sets the optional fill attribute to value.
21179//
21180// value: The value to pad if width > -1.  If empty, pads with spaces.
21181// Another typical value is '0'.  String cannot be longer than 1 character.
21182// If not specified, defaults to ""
21183func AsStringFill(value string) AsStringAttr {
21184	return func(m optionalAttr) {
21185		m["fill"] = value
21186	}
21187}
21188
21189// Converts each entry in the given tensor to strings.
21190//
21191// Supports many numeric types and boolean.
21192//
21193// For Unicode, see the
21194// [https://www.tensorflow.org/tutorials/representation/unicode](Working with Unicode text)
21195// tutorial.
21196//
21197// Examples:
21198//
21199// >>> tf.strings.as_string([3, 2])
21200// <tf.Tensor: shape=(2,), dtype=string, numpy=array([b'3', b'2'], dtype=object)>
21201// >>> tf.strings.as_string([3.1415926, 2.71828], precision=2).numpy()
21202// array([b'3.14', b'2.72'], dtype=object)
21203func AsString(scope *Scope, input tf.Output, optional ...AsStringAttr) (output tf.Output) {
21204	if scope.Err() != nil {
21205		return
21206	}
21207	attrs := map[string]interface{}{}
21208	for _, a := range optional {
21209		a(attrs)
21210	}
21211	opspec := tf.OpSpec{
21212		Type: "AsString",
21213		Input: []tf.Input{
21214			input,
21215		},
21216		Attrs: attrs,
21217	}
21218	op := scope.AddOperation(opspec)
21219	return op.Output(0)
21220}
21221
21222// Conv3DBackpropFilterV2Attr is an optional argument to Conv3DBackpropFilterV2.
21223type Conv3DBackpropFilterV2Attr func(optionalAttr)
21224
21225// Conv3DBackpropFilterV2DataFormat sets the optional data_format attribute to value.
21226//
21227// value: The data format of the input and output data. With the
21228// default format "NDHWC", the data is stored in the order of:
21229//     [batch, in_depth, in_height, in_width, in_channels].
21230// Alternatively, the format could be "NCDHW", the data storage order is:
21231//     [batch, in_channels, in_depth, in_height, in_width].
21232// If not specified, defaults to "NDHWC"
21233func Conv3DBackpropFilterV2DataFormat(value string) Conv3DBackpropFilterV2Attr {
21234	return func(m optionalAttr) {
21235		m["data_format"] = value
21236	}
21237}
21238
21239// Conv3DBackpropFilterV2Dilations sets the optional dilations attribute to value.
21240//
21241// value: 1-D tensor of length 5.  The dilation factor for each dimension of
21242// `input`. If set to k > 1, there will be k-1 skipped cells between each
21243// filter element on that dimension. The dimension order is determined by the
21244// value of `data_format`, see above for details. Dilations in the batch and
21245// depth dimensions must be 1.
21246// If not specified, defaults to <i:1 i:1 i:1 i:1 i:1 >
21247func Conv3DBackpropFilterV2Dilations(value []int64) Conv3DBackpropFilterV2Attr {
21248	return func(m optionalAttr) {
21249		m["dilations"] = value
21250	}
21251}
21252
21253// Computes the gradients of 3-D convolution with respect to the filter.
21254//
21255// Arguments:
21256//	input: Shape `[batch, depth, rows, cols, in_channels]`.
21257//	filter_sizes: An integer vector representing the tensor shape of `filter`,
21258// where `filter` is a 5-D
21259// `[filter_depth, filter_height, filter_width, in_channels, out_channels]`
21260// tensor.
21261//	out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
21262// out_channels]`.
21263//	strides: 1-D tensor of length 5. The stride of the sliding window for each
21264// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
21265//	padding: The type of padding algorithm to use.
21266func Conv3DBackpropFilterV2(scope *Scope, input tf.Output, filter_sizes tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv3DBackpropFilterV2Attr) (output tf.Output) {
21267	if scope.Err() != nil {
21268		return
21269	}
21270	attrs := map[string]interface{}{"strides": strides, "padding": padding}
21271	for _, a := range optional {
21272		a(attrs)
21273	}
21274	opspec := tf.OpSpec{
21275		Type: "Conv3DBackpropFilterV2",
21276		Input: []tf.Input{
21277			input, filter_sizes, out_backprop,
21278		},
21279		Attrs: attrs,
21280	}
21281	op := scope.AddOperation(opspec)
21282	return op.Output(0)
21283}
21284
21285// Returns x + y element-wise.
21286//
21287// *NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting
21288// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
21289func AddV2(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
21290	if scope.Err() != nil {
21291		return
21292	}
21293	opspec := tf.OpSpec{
21294		Type: "AddV2",
21295		Input: []tf.Input{
21296			x, y,
21297		},
21298	}
21299	op := scope.AddOperation(opspec)
21300	return op.Output(0)
21301}
21302
21303// UniformCandidateSamplerAttr is an optional argument to UniformCandidateSampler.
21304type UniformCandidateSamplerAttr func(optionalAttr)
21305
21306// UniformCandidateSamplerSeed sets the optional seed attribute to value.
21307//
21308// value: If either seed or seed2 are set to be non-zero, the random number
21309// generator is seeded by the given seed.  Otherwise, it is seeded by a
21310// random seed.
21311// If not specified, defaults to 0
21312func UniformCandidateSamplerSeed(value int64) UniformCandidateSamplerAttr {
21313	return func(m optionalAttr) {
21314		m["seed"] = value
21315	}
21316}
21317
21318// UniformCandidateSamplerSeed2 sets the optional seed2 attribute to value.
21319//
21320// value: An second seed to avoid seed collision.
21321// If not specified, defaults to 0
21322func UniformCandidateSamplerSeed2(value int64) UniformCandidateSamplerAttr {
21323	return func(m optionalAttr) {
21324		m["seed2"] = value
21325	}
21326}
21327
21328// Generates labels for candidate sampling with a uniform distribution.
21329//
21330// See explanations of candidate sampling and the data formats at
21331// go/candidate-sampling.
21332//
21333// For each batch, this op picks a single set of sampled candidate labels.
21334//
21335// The advantages of sampling candidates per-batch are simplicity and the
21336// possibility of efficient dense matrix multiplication. The disadvantage is that
21337// the sampled candidates must be chosen independently of the context and of the
21338// true labels.
21339//
21340// Arguments:
21341//	true_classes: A batch_size * num_true matrix, in which each row contains the
21342// IDs of the num_true target_classes in the corresponding original label.
21343//	num_true: Number of true labels per context.
21344//	num_sampled: Number of candidates to randomly sample.
21345//	unique: If unique is true, we sample with rejection, so that all sampled
21346// candidates in a batch are unique. This requires some approximation to
21347// estimate the post-rejection sampling probabilities.
21348//	range_max: The sampler will sample integers from the interval [0, range_max).
21349//
21350// Returns:
21351//	sampled_candidates: A vector of length num_sampled, in which each element is
21352// the ID of a sampled candidate.
21353//	true_expected_count: A batch_size * num_true matrix, representing
21354// the number of times each candidate is expected to occur in a batch
21355// of sampled candidates. If unique=true, then this is a probability.
21356//	sampled_expected_count: A vector of length num_sampled, for each sampled
21357// candidate representing the number of times the candidate is expected
21358// to occur in a batch of sampled candidates.  If unique=true, then this is a
21359// probability.
21360func UniformCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, range_max int64, optional ...UniformCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output) {
21361	if scope.Err() != nil {
21362		return
21363	}
21364	attrs := map[string]interface{}{"num_true": num_true, "num_sampled": num_sampled, "unique": unique, "range_max": range_max}
21365	for _, a := range optional {
21366		a(attrs)
21367	}
21368	opspec := tf.OpSpec{
21369		Type: "UniformCandidateSampler",
21370		Input: []tf.Input{
21371			true_classes,
21372		},
21373		Attrs: attrs,
21374	}
21375	op := scope.AddOperation(opspec)
21376	return op.Output(0), op.Output(1), op.Output(2)
21377}
21378
21379// ResourceGatherAttr is an optional argument to ResourceGather.
21380type ResourceGatherAttr func(optionalAttr)
21381
21382// ResourceGatherBatchDims sets the optional batch_dims attribute to value.
21383// If not specified, defaults to 0
21384func ResourceGatherBatchDims(value int64) ResourceGatherAttr {
21385	return func(m optionalAttr) {
21386		m["batch_dims"] = value
21387	}
21388}
21389
21390// ResourceGatherValidateIndices sets the optional validate_indices attribute to value.
21391// If not specified, defaults to true
21392func ResourceGatherValidateIndices(value bool) ResourceGatherAttr {
21393	return func(m optionalAttr) {
21394		m["validate_indices"] = value
21395	}
21396}
21397
21398// Gather slices from the variable pointed to by `resource` according to `indices`.
21399//
21400// `indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
21401// Produces an output tensor with shape `indices.shape + params.shape[1:]` where:
21402//
21403// ```python
21404//     # Scalar indices
21405//     output[:, ..., :] = params[indices, :, ... :]
21406//
21407//     # Vector indices
21408//     output[i, :, ..., :] = params[indices[i], :, ... :]
21409//
21410//     # Higher rank indices
21411//     output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]
21412// ```
21413func ResourceGather(scope *Scope, resource tf.Output, indices tf.Output, dtype tf.DataType, optional ...ResourceGatherAttr) (output tf.Output) {
21414	if scope.Err() != nil {
21415		return
21416	}
21417	attrs := map[string]interface{}{"dtype": dtype}
21418	for _, a := range optional {
21419		a(attrs)
21420	}
21421	opspec := tf.OpSpec{
21422		Type: "ResourceGather",
21423		Input: []tf.Input{
21424			resource, indices,
21425		},
21426		Attrs: attrs,
21427	}
21428	op := scope.AddOperation(opspec)
21429	return op.Output(0)
21430}
21431
21432// Returns x + y element-wise.
21433//
21434// *NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting
21435// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
21436//
21437// Given two input tensors, the `tf.add` operation computes the sum for every element in the tensor.
21438//
21439// Both input and output have a range `(-inf, inf)`.
21440//
21441func Add(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
21442	if scope.Err() != nil {
21443		return
21444	}
21445	opspec := tf.OpSpec{
21446		Type: "Add",
21447		Input: []tf.Input{
21448			x, y,
21449		},
21450	}
21451	op := scope.AddOperation(opspec)
21452	return op.Output(0)
21453}
21454
21455// Returns element-wise smallest integer not less than x.
21456func Ceil(scope *Scope, x tf.Output) (y tf.Output) {
21457	if scope.Err() != nil {
21458		return
21459	}
21460	opspec := tf.OpSpec{
21461		Type: "Ceil",
21462		Input: []tf.Input{
21463			x,
21464		},
21465	}
21466	op := scope.AddOperation(opspec)
21467	return op.Output(0)
21468}
21469
21470// Returns element-wise largest integer not greater than x.
21471func Floor(scope *Scope, x tf.Output) (y tf.Output) {
21472	if scope.Err() != nil {
21473		return
21474	}
21475	opspec := tf.OpSpec{
21476		Type: "Floor",
21477		Input: []tf.Input{
21478			x,
21479		},
21480	}
21481	op := scope.AddOperation(opspec)
21482	return op.Output(0)
21483}
21484
21485// Computes the trignometric inverse tangent of x element-wise.
21486//
21487// The `tf.math.atan` operation returns the inverse of `tf.math.tan`, such that
21488// if `y = tf.math.tan(x)` then, `x = tf.math.atan(y)`.
21489//
21490// **Note**: The output of `tf.math.atan` will lie within the invertible range
21491// of tan, i.e (-pi/2, pi/2).
21492//
21493// For example:
21494//
21495// ```python
21496// # Note: [1.047, 0.785] ~= [(pi/3), (pi/4)]
21497// x = tf.constant([1.047, 0.785])
21498// y = tf.math.tan(x) # [1.731261, 0.99920404]
21499//
21500// tf.math.atan(y) # [1.047, 0.785] = x
21501// ```
21502//
21503func Atan(scope *Scope, x tf.Output) (y tf.Output) {
21504	if scope.Err() != nil {
21505		return
21506	}
21507	opspec := tf.OpSpec{
21508		Type: "Atan",
21509		Input: []tf.Input{
21510			x,
21511		},
21512	}
21513	op := scope.AddOperation(opspec)
21514	return op.Output(0)
21515}
21516
21517// Computes acos of x element-wise.
21518//
21519//
21520//   Provided an input tensor, the `tf.math.acos` operation returns the inverse cosine of each element of the tensor. If `y = tf.math.cos(x)` then, `x = tf.math.acos(y)`.
21521//
21522//   Input range is `[-1, 1]` and the output has a range of `[0, pi]`.
21523//
21524func Acos(scope *Scope, x tf.Output) (y tf.Output) {
21525	if scope.Err() != nil {
21526		return
21527	}
21528	opspec := tf.OpSpec{
21529		Type: "Acos",
21530		Input: []tf.Input{
21531			x,
21532		},
21533	}
21534	op := scope.AddOperation(opspec)
21535	return op.Output(0)
21536}
21537
21538// FusedBatchNormV2Attr is an optional argument to FusedBatchNormV2.
21539type FusedBatchNormV2Attr func(optionalAttr)
21540
21541// FusedBatchNormV2Epsilon sets the optional epsilon attribute to value.
21542//
21543// value: A small float number added to the variance of x.
21544// If not specified, defaults to 0.0001
21545func FusedBatchNormV2Epsilon(value float32) FusedBatchNormV2Attr {
21546	return func(m optionalAttr) {
21547		m["epsilon"] = value
21548	}
21549}
21550
21551// FusedBatchNormV2ExponentialAvgFactor sets the optional exponential_avg_factor attribute to value.
21552// If not specified, defaults to 1
21553func FusedBatchNormV2ExponentialAvgFactor(value float32) FusedBatchNormV2Attr {
21554	return func(m optionalAttr) {
21555		m["exponential_avg_factor"] = value
21556	}
21557}
21558
21559// FusedBatchNormV2DataFormat sets the optional data_format attribute to value.
21560//
21561// value: The data format for x and y. Either "NHWC" (default) or "NCHW".
21562// If not specified, defaults to "NHWC"
21563func FusedBatchNormV2DataFormat(value string) FusedBatchNormV2Attr {
21564	return func(m optionalAttr) {
21565		m["data_format"] = value
21566	}
21567}
21568
21569// FusedBatchNormV2IsTraining sets the optional is_training attribute to value.
21570//
21571// value: A bool value to indicate the operation is for training (default)
21572// or inference.
21573// If not specified, defaults to true
21574func FusedBatchNormV2IsTraining(value bool) FusedBatchNormV2Attr {
21575	return func(m optionalAttr) {
21576		m["is_training"] = value
21577	}
21578}
21579
21580// Batch normalization.
21581//
21582// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
21583// The size of 1D Tensors matches the dimension C of the 4D Tensors.
21584//
21585// Arguments:
21586//	x: A 4D Tensor for input data.
21587//	scale: A 1D Tensor for scaling factor, to scale the normalized x.
21588//	offset: A 1D Tensor for offset, to shift to the normalized x.
21589//	mean: A 1D Tensor for population mean. Used for inference only;
21590// must be empty for training.
21591//	variance: A 1D Tensor for population variance. Used for inference only;
21592// must be empty for training.
21593//
21594// Returns:
21595//	y: A 4D Tensor for output data.
21596//	batch_mean: A 1D Tensor for the computed batch mean, to be used by TensorFlow
21597// to compute the running mean.
21598//	batch_variance: A 1D Tensor for the computed batch variance, to be used by
21599// TensorFlow to compute the running variance.
21600//	reserve_space_1: A 1D Tensor for the computed batch mean, to be reused
21601// in the gradient computation.
21602//	reserve_space_2: A 1D Tensor for the computed batch variance (inverted variance
21603// in the cuDNN case), to be reused in the gradient computation.
21604func FusedBatchNormV2(scope *Scope, x tf.Output, scale tf.Output, offset tf.Output, mean tf.Output, variance tf.Output, optional ...FusedBatchNormV2Attr) (y tf.Output, batch_mean tf.Output, batch_variance tf.Output, reserve_space_1 tf.Output, reserve_space_2 tf.Output) {
21605	if scope.Err() != nil {
21606		return
21607	}
21608	attrs := map[string]interface{}{}
21609	for _, a := range optional {
21610		a(attrs)
21611	}
21612	opspec := tf.OpSpec{
21613		Type: "FusedBatchNormV2",
21614		Input: []tf.Input{
21615			x, scale, offset, mean, variance,
21616		},
21617		Attrs: attrs,
21618	}
21619	op := scope.AddOperation(opspec)
21620	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
21621}
21622
21623// Computes sine of x element-wise.
21624//
21625//   Given an input tensor, this function computes sine of every
21626//   element in the tensor. Input range is `(-inf, inf)` and
21627//   output range is `[-1,1]`.
21628//
21629//   ```python
21630//   x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10, float("inf")])
21631//   tf.math.sin(x) ==> [nan -0.4121185 -0.47942555 0.84147096 0.9320391 -0.87329733 -0.54402107 nan]
21632//   ```
21633func Sin(scope *Scope, x tf.Output) (y tf.Output) {
21634	if scope.Err() != nil {
21635		return
21636	}
21637	opspec := tf.OpSpec{
21638		Type: "Sin",
21639		Input: []tf.Input{
21640			x,
21641		},
21642	}
21643	op := scope.AddOperation(opspec)
21644	return op.Output(0)
21645}
21646
21647// Creates a Tensor by indexing into the TensorList.
21648//
21649// Each row in the produced Tensor corresponds to the element in the TensorList
21650// specified by the given index (see `tf.gather`).
21651//
21652// input_handle: The input tensor list.
21653// indices: The indices used to index into the list.
21654// values: The tensor.
21655func TensorListGather(scope *Scope, input_handle tf.Output, indices tf.Output, element_shape tf.Output, element_dtype tf.DataType) (values tf.Output) {
21656	if scope.Err() != nil {
21657		return
21658	}
21659	attrs := map[string]interface{}{"element_dtype": element_dtype}
21660	opspec := tf.OpSpec{
21661		Type: "TensorListGather",
21662		Input: []tf.Input{
21663			input_handle, indices, element_shape,
21664		},
21665		Attrs: attrs,
21666	}
21667	op := scope.AddOperation(opspec)
21668	return op.Output(0)
21669}
21670
21671// Computes the gradient of the sigmoid of `x` wrt its input.
21672//
21673// Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and
21674// `dy` is the corresponding input gradient.
21675func SigmoidGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
21676	if scope.Err() != nil {
21677		return
21678	}
21679	opspec := tf.OpSpec{
21680		Type: "SigmoidGrad",
21681		Input: []tf.Input{
21682			y, dy,
21683		},
21684	}
21685	op := scope.AddOperation(opspec)
21686	return op.Output(0)
21687}
21688
21689// ResourceSparseApplyAdadeltaAttr is an optional argument to ResourceSparseApplyAdadelta.
21690type ResourceSparseApplyAdadeltaAttr func(optionalAttr)
21691
21692// ResourceSparseApplyAdadeltaUseLocking sets the optional use_locking attribute to value.
21693//
21694// value: If True, updating of the var and accum tensors will be protected by
21695// a lock; otherwise the behavior is undefined, but may exhibit less contention.
21696// If not specified, defaults to false
21697func ResourceSparseApplyAdadeltaUseLocking(value bool) ResourceSparseApplyAdadeltaAttr {
21698	return func(m optionalAttr) {
21699		m["use_locking"] = value
21700	}
21701}
21702
21703// var: Should be from a Variable().
21704//
21705// Arguments:
21706//
21707//	accum: Should be from a Variable().
21708//	accum_update: : Should be from a Variable().
21709//	lr: Learning rate. Must be a scalar.
21710//	rho: Decay factor. Must be a scalar.
21711//	epsilon: Constant factor. Must be a scalar.
21712//	grad: The gradient.
21713//	indices: A vector of indices into the first dimension of var and accum.
21714//
21715// Returns the created operation.
21716func ResourceSparseApplyAdadelta(scope *Scope, var_ tf.Output, accum tf.Output, accum_update tf.Output, lr tf.Output, rho tf.Output, epsilon tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyAdadeltaAttr) (o *tf.Operation) {
21717	if scope.Err() != nil {
21718		return
21719	}
21720	attrs := map[string]interface{}{}
21721	for _, a := range optional {
21722		a(attrs)
21723	}
21724	opspec := tf.OpSpec{
21725		Type: "ResourceSparseApplyAdadelta",
21726		Input: []tf.Input{
21727			var_, accum, accum_update, lr, rho, epsilon, grad, indices,
21728		},
21729		Attrs: attrs,
21730	}
21731	return scope.AddOperation(opspec)
21732}
21733
21734// RetrieveTPUEmbeddingADAMParametersGradAccumDebugAttr is an optional argument to RetrieveTPUEmbeddingADAMParametersGradAccumDebug.
21735type RetrieveTPUEmbeddingADAMParametersGradAccumDebugAttr func(optionalAttr)
21736
21737// RetrieveTPUEmbeddingADAMParametersGradAccumDebugTableId sets the optional table_id attribute to value.
21738// If not specified, defaults to -1
21739func RetrieveTPUEmbeddingADAMParametersGradAccumDebugTableId(value int64) RetrieveTPUEmbeddingADAMParametersGradAccumDebugAttr {
21740	return func(m optionalAttr) {
21741		m["table_id"] = value
21742	}
21743}
21744
21745// RetrieveTPUEmbeddingADAMParametersGradAccumDebugTableName sets the optional table_name attribute to value.
21746// If not specified, defaults to ""
21747func RetrieveTPUEmbeddingADAMParametersGradAccumDebugTableName(value string) RetrieveTPUEmbeddingADAMParametersGradAccumDebugAttr {
21748	return func(m optionalAttr) {
21749		m["table_name"] = value
21750	}
21751}
21752
21753// RetrieveTPUEmbeddingADAMParametersGradAccumDebugConfig sets the optional config attribute to value.
21754// If not specified, defaults to ""
21755func RetrieveTPUEmbeddingADAMParametersGradAccumDebugConfig(value string) RetrieveTPUEmbeddingADAMParametersGradAccumDebugAttr {
21756	return func(m optionalAttr) {
21757		m["config"] = value
21758	}
21759}
21760
21761// Retrieve ADAM embedding parameters with debug support.
21762//
21763// An op that retrieves optimization parameters from embedding to host
21764// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
21765// the correct embedding table configuration. For example, this op is
21766// used to retrieve updated parameters before saving a checkpoint.
21767//
21768// Returns:
21769//	parameters: Parameter parameters updated by the ADAM optimization algorithm.
21770//	momenta: Parameter momenta updated by the ADAM optimization algorithm.
21771//	velocities: Parameter velocities updated by the ADAM optimization algorithm.
21772//	gradient_accumulators: Parameter gradient_accumulators updated by the ADAM optimization algorithm.
21773func RetrieveTPUEmbeddingADAMParametersGradAccumDebug(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingADAMParametersGradAccumDebugAttr) (parameters tf.Output, momenta tf.Output, velocities tf.Output, gradient_accumulators tf.Output) {
21774	if scope.Err() != nil {
21775		return
21776	}
21777	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
21778	for _, a := range optional {
21779		a(attrs)
21780	}
21781	opspec := tf.OpSpec{
21782		Type: "RetrieveTPUEmbeddingADAMParametersGradAccumDebug",
21783
21784		Attrs: attrs,
21785	}
21786	op := scope.AddOperation(opspec)
21787	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
21788}
21789
21790// ResourceApplyAdamAttr is an optional argument to ResourceApplyAdam.
21791type ResourceApplyAdamAttr func(optionalAttr)
21792
21793// ResourceApplyAdamUseLocking sets the optional use_locking attribute to value.
21794//
21795// value: If `True`, updating of the var, m, and v tensors will be protected
21796// by a lock; otherwise the behavior is undefined, but may exhibit less
21797// contention.
21798// If not specified, defaults to false
21799func ResourceApplyAdamUseLocking(value bool) ResourceApplyAdamAttr {
21800	return func(m optionalAttr) {
21801		m["use_locking"] = value
21802	}
21803}
21804
21805// ResourceApplyAdamUseNesterov sets the optional use_nesterov attribute to value.
21806//
21807// value: If `True`, uses the nesterov update.
21808// If not specified, defaults to false
21809func ResourceApplyAdamUseNesterov(value bool) ResourceApplyAdamAttr {
21810	return func(m optionalAttr) {
21811		m["use_nesterov"] = value
21812	}
21813}
21814
21815// Update '*var' according to the Adam algorithm.
21816//
21817// $$\text{lr}_t := \mathrm{learning_rate} * \sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$
21818// $$m_t := \beta_1 * m_{t-1} + (1 - \beta_1) * g$$
21819// $$v_t := \beta_2 * v_{t-1} + (1 - \beta_2) * g * g$$
21820// $$\text{variable} := \text{variable} - \text{lr}_t * m_t / (\sqrt{v_t} + \epsilon)$$
21821//
21822// Arguments:
21823//	var_: Should be from a Variable().
21824//	m: Should be from a Variable().
21825//	v: Should be from a Variable().
21826//	beta1_power: Must be a scalar.
21827//	beta2_power: Must be a scalar.
21828//	lr: Scaling factor. Must be a scalar.
21829//	beta1: Momentum factor. Must be a scalar.
21830//	beta2: Momentum factor. Must be a scalar.
21831//	epsilon: Ridge term. Must be a scalar.
21832//	grad: The gradient.
21833//
21834// Returns the created operation.
21835func ResourceApplyAdam(scope *Scope, var_ tf.Output, m tf.Output, v tf.Output, beta1_power tf.Output, beta2_power tf.Output, lr tf.Output, beta1 tf.Output, beta2 tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyAdamAttr) (o *tf.Operation) {
21836	if scope.Err() != nil {
21837		return
21838	}
21839	attrs := map[string]interface{}{}
21840	for _, a := range optional {
21841		a(attrs)
21842	}
21843	opspec := tf.OpSpec{
21844		Type: "ResourceApplyAdam",
21845		Input: []tf.Input{
21846			var_, m, v, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad,
21847		},
21848		Attrs: attrs,
21849	}
21850	return scope.AddOperation(opspec)
21851}
21852
21853// Computes sigmoid of `x` element-wise.
21854//
21855// Specifically, `y = 1 / (1 + exp(-x))`.
21856func Sigmoid(scope *Scope, x tf.Output) (y tf.Output) {
21857	if scope.Err() != nil {
21858		return
21859	}
21860	opspec := tf.OpSpec{
21861		Type: "Sigmoid",
21862		Input: []tf.Input{
21863			x,
21864		},
21865	}
21866	op := scope.AddOperation(opspec)
21867	return op.Output(0)
21868}
21869
21870// Computes Psi, the derivative of Lgamma (the log of the absolute value of
21871//
21872// `Gamma(x)`), element-wise.
21873func Digamma(scope *Scope, x tf.Output) (y tf.Output) {
21874	if scope.Err() != nil {
21875		return
21876	}
21877	opspec := tf.OpSpec{
21878		Type: "Digamma",
21879		Input: []tf.Input{
21880			x,
21881		},
21882	}
21883	op := scope.AddOperation(opspec)
21884	return op.Output(0)
21885}
21886
21887// Computes the gradient for the tanh of `x` wrt its input.
21888//
21889// Specifically, `grad = dy * (1 - y*y)`, where `y = tanh(x)`, and `dy`
21890// is the corresponding input gradient.
21891func TanhGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
21892	if scope.Err() != nil {
21893		return
21894	}
21895	opspec := tf.OpSpec{
21896		Type: "TanhGrad",
21897		Input: []tf.Input{
21898			y, dy,
21899		},
21900	}
21901	op := scope.AddOperation(opspec)
21902	return op.Output(0)
21903}
21904
21905// Computes hyperbolic tangent of `x` element-wise.
21906//
21907//   Given an input tensor, this function computes hyperbolic tangent of every
21908//   element in the tensor. Input range is `[-inf, inf]` and
21909//   output range is `[-1,1]`.
21910//
21911//   >>> x = tf.constant([-float("inf"), -5, -0.5, 1, 1.2, 2, 3, float("inf")])
21912//   >>> tf.math.tanh(x)
21913//   <tf.Tensor: shape=(8,), dtype=float32, numpy=
21914//   array([-1.        , -0.99990916, -0.46211717,  0.7615942 ,  0.8336547 ,
21915//           0.9640276 ,  0.9950547 ,  1.        ], dtype=float32)>
21916//
21917func Tanh(scope *Scope, x tf.Output) (y tf.Output) {
21918	if scope.Err() != nil {
21919		return
21920	}
21921	opspec := tf.OpSpec{
21922		Type: "Tanh",
21923		Input: []tf.Input{
21924			x,
21925		},
21926	}
21927	op := scope.AddOperation(opspec)
21928	return op.Output(0)
21929}
21930
21931// Computes hyperbolic sine of x element-wise.
21932//
21933//   Given an input tensor, this function computes hyperbolic sine of every
21934//   element in the tensor. Input range is `[-inf,inf]` and output range
21935//   is `[-inf,inf]`.
21936//
21937//   ```python
21938//   x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")])
21939//   tf.math.sinh(x) ==> [-inf -4.0515420e+03 -5.2109528e-01 1.1752012e+00 1.5094614e+00 3.6268604e+00 1.1013232e+04 inf]
21940//   ```
21941func Sinh(scope *Scope, x tf.Output) (y tf.Output) {
21942	if scope.Err() != nil {
21943		return
21944	}
21945	opspec := tf.OpSpec{
21946		Type: "Sinh",
21947		Input: []tf.Input{
21948			x,
21949		},
21950	}
21951	op := scope.AddOperation(opspec)
21952	return op.Output(0)
21953}
21954
21955// ResourceApplyProximalAdagradAttr is an optional argument to ResourceApplyProximalAdagrad.
21956type ResourceApplyProximalAdagradAttr func(optionalAttr)
21957
21958// ResourceApplyProximalAdagradUseLocking sets the optional use_locking attribute to value.
21959//
21960// value: If True, updating of the var and accum tensors will be protected by
21961// a lock; otherwise the behavior is undefined, but may exhibit less contention.
21962// If not specified, defaults to false
21963func ResourceApplyProximalAdagradUseLocking(value bool) ResourceApplyProximalAdagradAttr {
21964	return func(m optionalAttr) {
21965		m["use_locking"] = value
21966	}
21967}
21968
21969// Update '*var' and '*accum' according to FOBOS with Adagrad learning rate.
21970//
21971// accum += grad * grad
21972// prox_v = var - lr * grad * (1 / sqrt(accum))
21973// var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
21974//
21975// Arguments:
21976//	var_: Should be from a Variable().
21977//	accum: Should be from a Variable().
21978//	lr: Scaling factor. Must be a scalar.
21979//	l1: L1 regularization. Must be a scalar.
21980//	l2: L2 regularization. Must be a scalar.
21981//	grad: The gradient.
21982//
21983// Returns the created operation.
21984func ResourceApplyProximalAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, grad tf.Output, optional ...ResourceApplyProximalAdagradAttr) (o *tf.Operation) {
21985	if scope.Err() != nil {
21986		return
21987	}
21988	attrs := map[string]interface{}{}
21989	for _, a := range optional {
21990		a(attrs)
21991	}
21992	opspec := tf.OpSpec{
21993		Type: "ResourceApplyProximalAdagrad",
21994		Input: []tf.Input{
21995			var_, accum, lr, l1, l2, grad,
21996		},
21997		Attrs: attrs,
21998	}
21999	return scope.AddOperation(opspec)
22000}
22001
22002// Divides sparse updates into the variable referenced by `resource`.
22003//
22004// This operation computes
22005//
22006//     # Scalar indices
22007//     ref[indices, ...] /= updates[...]
22008//
22009//     # Vector indices (for each i)
22010//     ref[indices[i], ...] /= updates[i, ...]
22011//
22012//     # High rank indices (for each i, ..., j)
22013//     ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...]
22014//
22015// Duplicate entries are handled correctly: if multiple `indices` reference
22016// the same location, their contributions multiply.
22017//
22018// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
22019//
22020// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
22021// <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
22022// </div>
22023//
22024// Arguments:
22025//	resource: Should be from a `Variable` node.
22026//	indices: A tensor of indices into the first dimension of `ref`.
22027//	updates: A tensor of updated values to add to `ref`.
22028//
22029// Returns the created operation.
22030func ResourceScatterDiv(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation) {
22031	if scope.Err() != nil {
22032		return
22033	}
22034	opspec := tf.OpSpec{
22035		Type: "ResourceScatterDiv",
22036		Input: []tf.Input{
22037			resource, indices, updates,
22038		},
22039	}
22040	return scope.AddOperation(opspec)
22041}
22042
22043// Computes the trignometric inverse sine of x element-wise.
22044//
22045// The `tf.math.asin` operation returns the inverse of `tf.math.sin`, such that
22046// if `y = tf.math.sin(x)` then, `x = tf.math.asin(y)`.
22047//
22048// **Note**: The output of `tf.math.asin` will lie within the invertible range
22049// of sine, i.e [-pi/2, pi/2].
22050//
22051// For example:
22052//
22053// ```python
22054// # Note: [1.047, 0.785] ~= [(pi/3), (pi/4)]
22055// x = tf.constant([1.047, 0.785])
22056// y = tf.math.sin(x) # [0.8659266, 0.7068252]
22057//
22058// tf.math.asin(y) # [1.047, 0.785] = x
22059// ```
22060//
22061func Asin(scope *Scope, x tf.Output) (y tf.Output) {
22062	if scope.Err() != nil {
22063		return
22064	}
22065	opspec := tf.OpSpec{
22066		Type: "Asin",
22067		Input: []tf.Input{
22068			x,
22069		},
22070	}
22071	op := scope.AddOperation(opspec)
22072	return op.Output(0)
22073}
22074
22075// Computes natural logarithm of (1 + x) element-wise.
22076//
22077// I.e., \\(y = \log_e (1 + x)\\).
22078//
22079// Example:
22080//
22081// ```python
22082// x = tf.constant([0, 0.5, 1, 5])
22083// tf.math.log1p(x) ==> [0., 0.4054651, 0.6931472, 1.7917595]
22084// ```
22085func Log1p(scope *Scope, x tf.Output) (y tf.Output) {
22086	if scope.Err() != nil {
22087		return
22088	}
22089	opspec := tf.OpSpec{
22090		Type: "Log1p",
22091		Input: []tf.Input{
22092			x,
22093		},
22094	}
22095	op := scope.AddOperation(opspec)
22096	return op.Output(0)
22097}
22098
22099// Computes exponential of x element-wise.  \\(y = e^x\\).
22100//
22101//   This function computes the exponential of every element in the input tensor.
22102//   i.e. `exp(x)` or `e^(x)`, where `x` is the input tensor.
22103//   `e` denotes Euler's number and is approximately equal to 2.718281.
22104//   Output is positive for any real input.
22105//
22106//   ```python
22107//   x = tf.constant(2.0)
22108//   tf.math.exp(x) ==> 7.389056
22109//
22110//   x = tf.constant([2.0, 8.0])
22111//   tf.math.exp(x) ==> array([7.389056, 2980.958], dtype=float32)
22112//   ```
22113//
22114//   For complex numbers, the exponential value is calculated as follows:
22115//
22116//   ```
22117//   e^(x+iy) = e^x * e^iy = e^x * (cos y + i sin y)
22118//   ```
22119//
22120//   Let's consider complex number 1+1j as an example.
22121//   e^1 * (cos 1 + i sin 1) = 2.7182818284590 * (0.54030230586+0.8414709848j)
22122//
22123//   ```python
22124//   x = tf.constant(1 + 1j)
22125//   tf.math.exp(x) ==> 1.4686939399158851+2.2873552871788423j
22126//   ```
22127func Exp(scope *Scope, x tf.Output) (y tf.Output) {
22128	if scope.Err() != nil {
22129		return
22130	}
22131	opspec := tf.OpSpec{
22132		Type: "Exp",
22133		Input: []tf.Input{
22134			x,
22135		},
22136	}
22137	op := scope.AddOperation(opspec)
22138	return op.Output(0)
22139}
22140
22141// Computes square of x element-wise.
22142//
22143// I.e., \\(y = x * x = x^2\\).
22144func Square(scope *Scope, x tf.Output) (y tf.Output) {
22145	if scope.Err() != nil {
22146		return
22147	}
22148	opspec := tf.OpSpec{
22149		Type: "Square",
22150		Input: []tf.Input{
22151			x,
22152		},
22153	}
22154	op := scope.AddOperation(opspec)
22155	return op.Output(0)
22156}
22157
22158// Computes the gradient for the inverse of `x` wrt its input.
22159//
22160// Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy`
22161// is the corresponding input gradient.
22162func ReciprocalGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
22163	if scope.Err() != nil {
22164		return
22165	}
22166	opspec := tf.OpSpec{
22167		Type: "ReciprocalGrad",
22168		Input: []tf.Input{
22169			y, dy,
22170		},
22171	}
22172	op := scope.AddOperation(opspec)
22173	return op.Output(0)
22174}
22175
22176// Computes the reciprocal of x element-wise.
22177//
22178// I.e., \\(y = 1 / x\\).
22179func Inv(scope *Scope, x tf.Output) (y tf.Output) {
22180	if scope.Err() != nil {
22181		return
22182	}
22183	opspec := tf.OpSpec{
22184		Type: "Inv",
22185		Input: []tf.Input{
22186			x,
22187		},
22188	}
22189	op := scope.AddOperation(opspec)
22190	return op.Output(0)
22191}
22192
22193// ComplexAbsAttr is an optional argument to ComplexAbs.
22194type ComplexAbsAttr func(optionalAttr)
22195
22196// ComplexAbsTout sets the optional Tout attribute to value.
22197// If not specified, defaults to DT_FLOAT
22198func ComplexAbsTout(value tf.DataType) ComplexAbsAttr {
22199	return func(m optionalAttr) {
22200		m["Tout"] = value
22201	}
22202}
22203
22204// Computes the complex absolute value of a tensor.
22205//
22206// Given a tensor `x` of complex numbers, this operation returns a tensor of type
22207// `float` or `double` that is the absolute value of each element in `x`. All
22208// elements in `x` must be complex numbers of the form \\(a + bj\\). The absolute
22209// value is computed as \\( \sqrt{a^2 + b^2}\\).
22210func ComplexAbs(scope *Scope, x tf.Output, optional ...ComplexAbsAttr) (y tf.Output) {
22211	if scope.Err() != nil {
22212		return
22213	}
22214	attrs := map[string]interface{}{}
22215	for _, a := range optional {
22216		a(attrs)
22217	}
22218	opspec := tf.OpSpec{
22219		Type: "ComplexAbs",
22220		Input: []tf.Input{
22221			x,
22222		},
22223		Attrs: attrs,
22224	}
22225	op := scope.AddOperation(opspec)
22226	return op.Output(0)
22227}
22228
22229// Computes the absolute value of a tensor.
22230//
22231// Given a tensor `x`, this operation returns a tensor containing the absolute
22232// value of each element in `x`. For example, if x is an input element and y is
22233// an output element, this operation computes \\(y = |x|\\).
22234func Abs(scope *Scope, x tf.Output) (y tf.Output) {
22235	if scope.Err() != nil {
22236		return
22237	}
22238	opspec := tf.OpSpec{
22239		Type: "Abs",
22240		Input: []tf.Input{
22241			x,
22242		},
22243	}
22244	op := scope.AddOperation(opspec)
22245	return op.Output(0)
22246}
22247
22248// Produces a summary of any statistics recorded by the given statistics manager.
22249func ExperimentalStatsAggregatorSummary(scope *Scope, iterator tf.Output) (summary tf.Output) {
22250	if scope.Err() != nil {
22251		return
22252	}
22253	opspec := tf.OpSpec{
22254		Type: "ExperimentalStatsAggregatorSummary",
22255		Input: []tf.Input{
22256			iterator,
22257		},
22258	}
22259	op := scope.AddOperation(opspec)
22260	return op.Output(0)
22261}
22262
22263// RandomStandardNormalAttr is an optional argument to RandomStandardNormal.
22264type RandomStandardNormalAttr func(optionalAttr)
22265
22266// RandomStandardNormalSeed sets the optional seed attribute to value.
22267//
22268// value: If either `seed` or `seed2` are set to be non-zero, the random number
22269// generator is seeded by the given seed.  Otherwise, it is seeded by a
22270// random seed.
22271// If not specified, defaults to 0
22272func RandomStandardNormalSeed(value int64) RandomStandardNormalAttr {
22273	return func(m optionalAttr) {
22274		m["seed"] = value
22275	}
22276}
22277
22278// RandomStandardNormalSeed2 sets the optional seed2 attribute to value.
22279//
22280// value: A second seed to avoid seed collision.
22281// If not specified, defaults to 0
22282func RandomStandardNormalSeed2(value int64) RandomStandardNormalAttr {
22283	return func(m optionalAttr) {
22284		m["seed2"] = value
22285	}
22286}
22287
22288// Outputs random values from a normal distribution.
22289//
22290// The generated values will have mean 0 and standard deviation 1.
22291//
22292// Arguments:
22293//	shape: The shape of the output tensor.
22294//	dtype: The type of the output.
22295//
22296// Returns A tensor of the specified shape filled with random normal values.
22297func RandomStandardNormal(scope *Scope, shape tf.Output, dtype tf.DataType, optional ...RandomStandardNormalAttr) (output tf.Output) {
22298	if scope.Err() != nil {
22299		return
22300	}
22301	attrs := map[string]interface{}{"dtype": dtype}
22302	for _, a := range optional {
22303		a(attrs)
22304	}
22305	opspec := tf.OpSpec{
22306		Type: "RandomStandardNormal",
22307		Input: []tf.Input{
22308			shape,
22309		},
22310		Attrs: attrs,
22311	}
22312	op := scope.AddOperation(opspec)
22313	return op.Output(0)
22314}
22315
22316// Computes the Gauss error function of `x` element-wise.
22317func Erf(scope *Scope, x tf.Output) (y tf.Output) {
22318	if scope.Err() != nil {
22319		return
22320	}
22321	opspec := tf.OpSpec{
22322		Type: "Erf",
22323		Input: []tf.Input{
22324			x,
22325		},
22326	}
22327	op := scope.AddOperation(opspec)
22328	return op.Output(0)
22329}
22330
22331// Computes the maximum along segments of a tensor.
22332//
22333// Read
22334// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
22335// for an explanation of segments.
22336//
22337// Computes a tensor such that
22338// \\(output_i = \max_j(data_j)\\) where `max` is over `j` such
22339// that `segment_ids[j] == i`.
22340//
22341// If the max is empty for a given segment ID `i`, `output[i] = 0`.
22342//
22343// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
22344// <img style="width:100%" src="https://www.tensorflow.org/images/SegmentMax.png" alt>
22345// </div>
22346//
22347// For example:
22348//
22349// ```
22350// c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]])
22351// tf.segment_max(c, tf.constant([0, 0, 1]))
22352// # ==> [[4, 3, 3, 4],
22353// #      [5, 6, 7, 8]]
22354// ```
22355//
22356//
22357// Arguments:
22358//
22359//	segment_ids: A 1-D tensor whose size is equal to the size of `data`'s
22360// first dimension.  Values should be sorted and can be repeated.
22361//
22362// Returns Has same shape as data, except for dimension 0 which
22363// has size `k`, the number of segments.
22364func SegmentMax(scope *Scope, data tf.Output, segment_ids tf.Output) (output tf.Output) {
22365	if scope.Err() != nil {
22366		return
22367	}
22368	opspec := tf.OpSpec{
22369		Type: "SegmentMax",
22370		Input: []tf.Input{
22371			data, segment_ids,
22372		},
22373	}
22374	op := scope.AddOperation(opspec)
22375	return op.Output(0)
22376}
22377
22378// CastAttr is an optional argument to Cast.
22379type CastAttr func(optionalAttr)
22380
22381// CastTruncate sets the optional Truncate attribute to value.
22382// If not specified, defaults to false
22383func CastTruncate(value bool) CastAttr {
22384	return func(m optionalAttr) {
22385		m["Truncate"] = value
22386	}
22387}
22388
22389// Cast x of type SrcT to y of DstT.
22390func Cast(scope *Scope, x tf.Output, DstT tf.DataType, optional ...CastAttr) (y tf.Output) {
22391	if scope.Err() != nil {
22392		return
22393	}
22394	attrs := map[string]interface{}{"DstT": DstT}
22395	for _, a := range optional {
22396		a(attrs)
22397	}
22398	opspec := tf.OpSpec{
22399		Type: "Cast",
22400		Input: []tf.Input{
22401			x,
22402		},
22403		Attrs: attrs,
22404	}
22405	op := scope.AddOperation(opspec)
22406	return op.Output(0)
22407}
22408
22409// Returns 0 if x == 0, and x / y otherwise, elementwise.
22410func Xdivy(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
22411	if scope.Err() != nil {
22412		return
22413	}
22414	opspec := tf.OpSpec{
22415		Type: "Xdivy",
22416		Input: []tf.Input{
22417			x, y,
22418		},
22419	}
22420	op := scope.AddOperation(opspec)
22421	return op.Output(0)
22422}
22423
22424// ResourceApplyAdamWithAmsgradAttr is an optional argument to ResourceApplyAdamWithAmsgrad.
22425type ResourceApplyAdamWithAmsgradAttr func(optionalAttr)
22426
22427// ResourceApplyAdamWithAmsgradUseLocking sets the optional use_locking attribute to value.
22428//
22429// value: If `True`, updating of the var, m, and v tensors will be protected
22430// by a lock; otherwise the behavior is undefined, but may exhibit less
22431// contention.
22432// If not specified, defaults to false
22433func ResourceApplyAdamWithAmsgradUseLocking(value bool) ResourceApplyAdamWithAmsgradAttr {
22434	return func(m optionalAttr) {
22435		m["use_locking"] = value
22436	}
22437}
22438
22439// Update '*var' according to the Adam algorithm.
22440//
22441// $$\text{lr}_t := \mathrm{learning_rate} * \sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$
22442// $$m_t := \beta_1 * m_{t-1} + (1 - \beta_1) * g$$
22443// $$v_t := \beta_2 * v_{t-1} + (1 - \beta_2) * g * g$$
22444// $$\hat{v}_t := max{\hat{v}_{t-1}, v_t}$$
22445// $$\text{variable} := \text{variable} - \text{lr}_t * m_t / (\sqrt{\hat{v}_t} + \epsilon)$$
22446//
22447// Arguments:
22448//	var_: Should be from a Variable().
22449//	m: Should be from a Variable().
22450//	v: Should be from a Variable().
22451//	vhat: Should be from a Variable().
22452//	beta1_power: Must be a scalar.
22453//	beta2_power: Must be a scalar.
22454//	lr: Scaling factor. Must be a scalar.
22455//	beta1: Momentum factor. Must be a scalar.
22456//	beta2: Momentum factor. Must be a scalar.
22457//	epsilon: Ridge term. Must be a scalar.
22458//	grad: The gradient.
22459//
22460// Returns the created operation.
22461func ResourceApplyAdamWithAmsgrad(scope *Scope, var_ tf.Output, m tf.Output, v tf.Output, vhat tf.Output, beta1_power tf.Output, beta2_power tf.Output, lr tf.Output, beta1 tf.Output, beta2 tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyAdamWithAmsgradAttr) (o *tf.Operation) {
22462	if scope.Err() != nil {
22463		return
22464	}
22465	attrs := map[string]interface{}{}
22466	for _, a := range optional {
22467		a(attrs)
22468	}
22469	opspec := tf.OpSpec{
22470		Type: "ResourceApplyAdamWithAmsgrad",
22471		Input: []tf.Input{
22472			var_, m, v, vhat, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad,
22473		},
22474		Attrs: attrs,
22475	}
22476	return scope.AddOperation(opspec)
22477}
22478
22479// Computes the sum along segments of a tensor.
22480//
22481// Read
22482// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
22483// for an explanation of segments.
22484//
22485// Computes a tensor such that
22486// \\(output_i = \sum_j data_j\\) where sum is over `j` such
22487// that `segment_ids[j] == i`.
22488//
22489// If the sum is empty for a given segment ID `i`, `output[i] = 0`.
22490//
22491// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
22492// <img style="width:100%" src="https://www.tensorflow.org/images/SegmentSum.png" alt>
22493// </div>
22494//
22495// For example:
22496//
22497// ```
22498// c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]])
22499// tf.segment_sum(c, tf.constant([0, 0, 1]))
22500// # ==> [[5, 5, 5, 5],
22501// #      [5, 6, 7, 8]]
22502// ```
22503//
22504//
22505// Arguments:
22506//
22507//	segment_ids: A 1-D tensor whose size is equal to the size of `data`'s
22508// first dimension.  Values should be sorted and can be repeated.
22509//
22510// Returns Has same shape as data, except for dimension 0 which
22511// has size `k`, the number of segments.
22512func SegmentSum(scope *Scope, data tf.Output, segment_ids tf.Output) (output tf.Output) {
22513	if scope.Err() != nil {
22514		return
22515	}
22516	opspec := tf.OpSpec{
22517		Type: "SegmentSum",
22518		Input: []tf.Input{
22519			data, segment_ids,
22520		},
22521	}
22522	op := scope.AddOperation(opspec)
22523	return op.Output(0)
22524}
22525
22526// Compute the pairwise cross product.
22527//
22528// `a` and `b` must be the same shape; they can either be simple 3-element vectors,
22529// or any shape where the innermost dimension is 3. In the latter case, each pair
22530// of corresponding 3-element vectors is cross-multiplied independently.
22531//
22532// Arguments:
22533//	a: A tensor containing 3-element vectors.
22534//	b: Another tensor, of same type and shape as `a`.
22535//
22536// Returns Pairwise cross product of the vectors in `a` and `b`.
22537func Cross(scope *Scope, a tf.Output, b tf.Output) (product tf.Output) {
22538	if scope.Err() != nil {
22539		return
22540	}
22541	opspec := tf.OpSpec{
22542		Type: "Cross",
22543		Input: []tf.Input{
22544			a, b,
22545		},
22546	}
22547	op := scope.AddOperation(opspec)
22548	return op.Output(0)
22549}
22550
22551// Sends `input` to all devices that are connected to the output.
22552//
22553// Sends `input` to all devices that are connected to the output.
22554//
22555// The graph should be constructed so that all ops connected to the output have a
22556// valid device assignment, and the op itself is assigned one of these devices.
22557//
22558// input: The input to the broadcast.
22559// output: The same as input.
22560// shape: The shape of the input tensor.
22561//
22562func NcclBroadcast(scope *Scope, input tf.Output, shape tf.Shape) (output tf.Output) {
22563	if scope.Err() != nil {
22564		return
22565	}
22566	attrs := map[string]interface{}{"shape": shape}
22567	opspec := tf.OpSpec{
22568		Type: "NcclBroadcast",
22569		Input: []tf.Input{
22570			input,
22571		},
22572		Attrs: attrs,
22573	}
22574	op := scope.AddOperation(opspec)
22575	return op.Output(0)
22576}
22577
22578// Conv2DAttr is an optional argument to Conv2D.
22579type Conv2DAttr func(optionalAttr)
22580
22581// Conv2DUseCudnnOnGpu sets the optional use_cudnn_on_gpu attribute to value.
22582// If not specified, defaults to true
22583func Conv2DUseCudnnOnGpu(value bool) Conv2DAttr {
22584	return func(m optionalAttr) {
22585		m["use_cudnn_on_gpu"] = value
22586	}
22587}
22588
22589// Conv2DExplicitPaddings sets the optional explicit_paddings attribute to value.
22590//
22591// value: If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith
22592// dimension, the amount of padding inserted before and after the dimension is
22593// `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If
22594// `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty.
22595// If not specified, defaults to <>
22596func Conv2DExplicitPaddings(value []int64) Conv2DAttr {
22597	return func(m optionalAttr) {
22598		m["explicit_paddings"] = value
22599	}
22600}
22601
22602// Conv2DDataFormat sets the optional data_format attribute to value.
22603//
22604// value: Specify the data format of the input and output data. With the
22605// default format "NHWC", the data is stored in the order of:
22606//     [batch, height, width, channels].
22607// Alternatively, the format could be "NCHW", the data storage order of:
22608//     [batch, channels, height, width].
22609// If not specified, defaults to "NHWC"
22610func Conv2DDataFormat(value string) Conv2DAttr {
22611	return func(m optionalAttr) {
22612		m["data_format"] = value
22613	}
22614}
22615
22616// Conv2DDilations sets the optional dilations attribute to value.
22617//
22618// value: 1-D tensor of length 4.  The dilation factor for each dimension of
22619// `input`. If set to k > 1, there will be k-1 skipped cells between each
22620// filter element on that dimension. The dimension order is determined by the
22621// value of `data_format`, see above for details. Dilations in the batch and
22622// depth dimensions must be 1.
22623// If not specified, defaults to <i:1 i:1 i:1 i:1 >
22624func Conv2DDilations(value []int64) Conv2DAttr {
22625	return func(m optionalAttr) {
22626		m["dilations"] = value
22627	}
22628}
22629
22630// Computes a 2-D convolution given 4-D `input` and `filter` tensors.
22631//
22632// Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
22633// and a filter / kernel tensor of shape
22634// `[filter_height, filter_width, in_channels, out_channels]`, this op
22635// performs the following:
22636//
22637// 1. Flattens the filter to a 2-D matrix with shape
22638//    `[filter_height * filter_width * in_channels, output_channels]`.
22639// 2. Extracts image patches from the input tensor to form a *virtual*
22640//    tensor of shape `[batch, out_height, out_width,
22641//    filter_height * filter_width * in_channels]`.
22642// 3. For each patch, right-multiplies the filter matrix and the image patch
22643//    vector.
22644//
22645// In detail, with the default NHWC format,
22646//
22647//     output[b, i, j, k] =
22648//         sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] *
22649//                         filter[di, dj, q, k]
22650//
22651// Must have `strides[0] = strides[3] = 1`.  For the most common case of the same
22652// horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
22653//
22654// Arguments:
22655//	input: A 4-D tensor. The dimension order is interpreted according to the value
22656// of `data_format`, see below for details.
22657//	filter: A 4-D tensor of shape
22658// `[filter_height, filter_width, in_channels, out_channels]`
22659//	strides: 1-D tensor of length 4.  The stride of the sliding window for each
22660// dimension of `input`. The dimension order is determined by the value of
22661// `data_format`, see below for details.
22662//	padding: The type of padding algorithm to use.
22663//
22664// Returns A 4-D tensor. The dimension order is determined by the value of
22665// `data_format`, see below for details.
22666func Conv2D(scope *Scope, input tf.Output, filter tf.Output, strides []int64, padding string, optional ...Conv2DAttr) (output tf.Output) {
22667	if scope.Err() != nil {
22668		return
22669	}
22670	attrs := map[string]interface{}{"strides": strides, "padding": padding}
22671	for _, a := range optional {
22672		a(attrs)
22673	}
22674	opspec := tf.OpSpec{
22675		Type: "Conv2D",
22676		Input: []tf.Input{
22677			input, filter,
22678		},
22679		Attrs: attrs,
22680	}
22681	op := scope.AddOperation(opspec)
22682	return op.Output(0)
22683}
22684
22685// Returns locations of nonzero / true values in a tensor.
22686//
22687// This operation returns the coordinates of true elements in `condition`. The
22688// coordinates are returned in a 2-D tensor where the first dimension (rows)
22689// represents the number of true elements, and the second dimension (columns)
22690// represents the coordinates of the true elements. Keep in mind, the shape of
22691// the output tensor can vary depending on how many true values there are in
22692// `condition`. Indices are output in row-major order.
22693//
22694// For example:
22695//
22696// ```
22697// # 'input' tensor is [[True, False]
22698// #                    [True, False]]
22699// # 'input' has two true values, so output has two coordinates.
22700// # 'input' has rank of 2, so coordinates have two indices.
22701// where(input) ==> [[0, 0],
22702//                   [1, 0]]
22703//
22704// # `condition` tensor is [[[True, False]
22705// #                     [True, False]]
22706// #                    [[False, True]
22707// #                     [False, True]]
22708// #                    [[False, False]
22709// #                     [False, True]]]
22710// # 'input' has 5 true values, so output has 5 coordinates.
22711// # 'input' has rank of 3, so coordinates have three indices.
22712// where(input) ==> [[0, 0, 0],
22713//                   [0, 1, 0],
22714//                   [1, 0, 1],
22715//                   [1, 1, 1],
22716//                   [2, 1, 1]]
22717//
22718// # `condition` tensor is [[[1.5,  0.0]
22719// #                     [-0.5, 0.0]]
22720// #                    [[0.0,  0.25]
22721// #                     [0.0,  0.75]]
22722// #                    [[0.0,  0.0]
22723// #                     [0.0,  0.01]]]
22724// # 'input' has 5 nonzero values, so output has 5 coordinates.
22725// # 'input' has rank of 3, so coordinates have three indices.
22726// where(input) ==> [[0, 0, 0],
22727//                   [0, 1, 0],
22728//                   [1, 0, 1],
22729//                   [1, 1, 1],
22730//                   [2, 1, 1]]
22731//
22732// # `condition` tensor is [[[1.5 + 0.0j, 0.0  + 0.0j]
22733// #                     [0.0 + 0.5j, 0.0  + 0.0j]]
22734// #                    [[0.0 + 0.0j, 0.25 + 1.5j]
22735// #                     [0.0 + 0.0j, 0.75 + 0.0j]]
22736// #                    [[0.0 + 0.0j, 0.0  + 0.0j]
22737// #                     [0.0 + 0.0j, 0.01 + 0.0j]]]
22738// # 'input' has 5 nonzero magnitude values, so output has 5 coordinates.
22739// # 'input' has rank of 3, so coordinates have three indices.
22740// where(input) ==> [[0, 0, 0],
22741//                   [0, 1, 0],
22742//                   [1, 0, 1],
22743//                   [1, 1, 1],
22744//                   [2, 1, 1]]
22745// ```
22746func Where(scope *Scope, condition tf.Output) (index tf.Output) {
22747	if scope.Err() != nil {
22748		return
22749	}
22750	opspec := tf.OpSpec{
22751		Type: "Where",
22752		Input: []tf.Input{
22753			condition,
22754		},
22755	}
22756	op := scope.AddOperation(opspec)
22757	return op.Output(0)
22758}
22759
22760// SvdAttr is an optional argument to Svd.
22761type SvdAttr func(optionalAttr)
22762
22763// SvdComputeUv sets the optional compute_uv attribute to value.
22764//
22765// value: If true, left and right singular vectors will be
22766// computed and returned in `u` and `v`, respectively.
22767// If false, `u` and `v` are not set and should never referenced.
22768// If not specified, defaults to true
22769func SvdComputeUv(value bool) SvdAttr {
22770	return func(m optionalAttr) {
22771		m["compute_uv"] = value
22772	}
22773}
22774
22775// SvdFullMatrices sets the optional full_matrices attribute to value.
22776//
22777// value: If true, compute full-sized `u` and `v`. If false
22778// (the default), compute only the leading `P` singular vectors.
22779// Ignored if `compute_uv` is `False`.
22780// If not specified, defaults to false
22781func SvdFullMatrices(value bool) SvdAttr {
22782	return func(m optionalAttr) {
22783		m["full_matrices"] = value
22784	}
22785}
22786
22787// Computes the singular value decompositions of one or more matrices.
22788//
22789// Computes the SVD of each inner matrix in `input` such that
22790// `input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :, :])`
22791//
22792// ```python
22793// # a is a tensor containing a batch of matrices.
22794// # s is a tensor of singular values for each matrix.
22795// # u is the tensor containing the left singular vectors for each matrix.
22796// # v is the tensor containing the right singular vectors for each matrix.
22797// s, u, v = svd(a)
22798// s, _, _ = svd(a, compute_uv=False)
22799// ```
22800//
22801// Arguments:
22802//	input: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions
22803// form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`.
22804//
22805// Returns:
22806//	s: Singular values. Shape is `[..., P]`.
22807//	u: Left singular vectors. If `full_matrices` is `False` then shape is
22808// `[..., M, P]`; if `full_matrices` is `True` then shape is
22809// `[..., M, M]`. Undefined if `compute_uv` is `False`.
22810//	v: Left singular vectors. If `full_matrices` is `False` then shape is
22811// `[..., N, P]`. If `full_matrices` is `True` then shape is `[..., N, N]`.
22812// Undefined if `compute_uv` is false.
22813func Svd(scope *Scope, input tf.Output, optional ...SvdAttr) (s tf.Output, u tf.Output, v tf.Output) {
22814	if scope.Err() != nil {
22815		return
22816	}
22817	attrs := map[string]interface{}{}
22818	for _, a := range optional {
22819		a(attrs)
22820	}
22821	opspec := tf.OpSpec{
22822		Type: "Svd",
22823		Input: []tf.Input{
22824			input,
22825		},
22826		Attrs: attrs,
22827	}
22828	op := scope.AddOperation(opspec)
22829	return op.Output(0), op.Output(1), op.Output(2)
22830}
22831
22832// Computes gradients for SparseSegmentMean.
22833//
22834// Returns tensor "output" with same shape as grad, except for dimension 0 whose
22835// value is output_dim0.
22836//
22837// Arguments:
22838//	grad: gradient propagated to the SparseSegmentMean op.
22839//	indices: indices passed to the corresponding SparseSegmentMean op.
22840//	segment_ids: segment_ids passed to the corresponding SparseSegmentMean op.
22841//	output_dim0: dimension 0 of "data" passed to SparseSegmentMean op.
22842func SparseSegmentMeanGrad(scope *Scope, grad tf.Output, indices tf.Output, segment_ids tf.Output, output_dim0 tf.Output) (output tf.Output) {
22843	if scope.Err() != nil {
22844		return
22845	}
22846	opspec := tf.OpSpec{
22847		Type: "SparseSegmentMeanGrad",
22848		Input: []tf.Input{
22849			grad, indices, segment_ids, output_dim0,
22850		},
22851	}
22852	op := scope.AddOperation(opspec)
22853	return op.Output(0)
22854}
22855
22856// Reduces `input` from `num_devices` using `reduction` to a single device.
22857//
22858// Reduces `input` from `num_devices` using `reduction` to a single device.
22859//
22860// The graph should be constructed so that all inputs have a valid device
22861// assignment, and the op itself is assigned one of these devices.
22862//
22863// input: The input to the reduction.
22864// data: the value of the reduction across all `num_devices` devices.
22865// reduction: the reduction operation to perform.
22866func NcclReduce(scope *Scope, input []tf.Output, reduction string) (data tf.Output) {
22867	if scope.Err() != nil {
22868		return
22869	}
22870	attrs := map[string]interface{}{"reduction": reduction}
22871	opspec := tf.OpSpec{
22872		Type: "NcclReduce",
22873		Input: []tf.Input{
22874			tf.OutputList(input),
22875		},
22876		Attrs: attrs,
22877	}
22878	op := scope.AddOperation(opspec)
22879	return op.Output(0)
22880}
22881
22882// An op to receive a tensor from the host.
22883//
22884// output: the tensor that will be received from the host.
22885// Toutput: element type for output.
22886// shape: shape for output.
22887// key: A unique identifier for this region used to match up host transfers.
22888func XlaRecvFromHost(scope *Scope, Toutput tf.DataType, shape tf.Shape, key string) (output tf.Output) {
22889	if scope.Err() != nil {
22890		return
22891	}
22892	attrs := map[string]interface{}{"Toutput": Toutput, "shape": shape, "key": key}
22893	opspec := tf.OpSpec{
22894		Type: "XlaRecvFromHost",
22895
22896		Attrs: attrs,
22897	}
22898	op := scope.AddOperation(opspec)
22899	return op.Output(0)
22900}
22901
22902// QuantizedDepthwiseConv2DWithBiasAndReluAttr is an optional argument to QuantizedDepthwiseConv2DWithBiasAndRelu.
22903type QuantizedDepthwiseConv2DWithBiasAndReluAttr func(optionalAttr)
22904
22905// QuantizedDepthwiseConv2DWithBiasAndReluOutType sets the optional out_type attribute to value.
22906//
22907// value: The type of the output.
22908// If not specified, defaults to DT_QINT32
22909func QuantizedDepthwiseConv2DWithBiasAndReluOutType(value tf.DataType) QuantizedDepthwiseConv2DWithBiasAndReluAttr {
22910	return func(m optionalAttr) {
22911		m["out_type"] = value
22912	}
22913}
22914
22915// QuantizedDepthwiseConv2DWithBiasAndReluDilations sets the optional dilations attribute to value.
22916//
22917// value: List of dilation values.
22918// If not specified, defaults to <i:1 i:1 i:1 i:1 >
22919func QuantizedDepthwiseConv2DWithBiasAndReluDilations(value []int64) QuantizedDepthwiseConv2DWithBiasAndReluAttr {
22920	return func(m optionalAttr) {
22921		m["dilations"] = value
22922	}
22923}
22924
22925// QuantizedDepthwiseConv2DWithBiasAndReluPaddingList sets the optional padding_list attribute to value.
22926// If not specified, defaults to <>
22927func QuantizedDepthwiseConv2DWithBiasAndReluPaddingList(value []int64) QuantizedDepthwiseConv2DWithBiasAndReluAttr {
22928	return func(m optionalAttr) {
22929		m["padding_list"] = value
22930	}
22931}
22932
22933// Computes quantized depthwise Conv2D with Bias and Relu.
22934//
22935// Arguments:
22936//	input: The original input tensor.
22937//	filter: The original filter tensor.
22938//	bias: The original bias tensor.
22939//	min_input: The float value that the minimum quantized input value represents.
22940//	max_input: The float value that the maximum quantized input value represents.
22941//	min_filter: The float value that the minimum quantized filter value represents.
22942//	max_filter: The float value that the maximum quantized filter value represents.
22943//	strides: List of stride values.
22944//
22945//
22946// Returns:
22947//	output: The output tensor.
22948//	min_output: The float value that the minimum quantized output value represents.
22949//	max_output: The float value that the maximum quantized output value represents.
22950func QuantizedDepthwiseConv2DWithBiasAndRelu(scope *Scope, input tf.Output, filter tf.Output, bias tf.Output, min_input tf.Output, max_input tf.Output, min_filter tf.Output, max_filter tf.Output, strides []int64, padding string, optional ...QuantizedDepthwiseConv2DWithBiasAndReluAttr) (output tf.Output, min_output tf.Output, max_output tf.Output) {
22951	if scope.Err() != nil {
22952		return
22953	}
22954	attrs := map[string]interface{}{"strides": strides, "padding": padding}
22955	for _, a := range optional {
22956		a(attrs)
22957	}
22958	opspec := tf.OpSpec{
22959		Type: "QuantizedDepthwiseConv2DWithBiasAndRelu",
22960		Input: []tf.Input{
22961			input, filter, bias, min_input, max_input, min_filter, max_filter,
22962		},
22963		Attrs: attrs,
22964	}
22965	op := scope.AddOperation(opspec)
22966	return op.Output(0), op.Output(1), op.Output(2)
22967}
22968
22969// QuantizedDepthwiseConv2DWithBiasAttr is an optional argument to QuantizedDepthwiseConv2DWithBias.
22970type QuantizedDepthwiseConv2DWithBiasAttr func(optionalAttr)
22971
22972// QuantizedDepthwiseConv2DWithBiasOutType sets the optional out_type attribute to value.
22973//
22974// value: The type of the output.
22975// If not specified, defaults to DT_QINT32
22976func QuantizedDepthwiseConv2DWithBiasOutType(value tf.DataType) QuantizedDepthwiseConv2DWithBiasAttr {
22977	return func(m optionalAttr) {
22978		m["out_type"] = value
22979	}
22980}
22981
22982// QuantizedDepthwiseConv2DWithBiasDilations sets the optional dilations attribute to value.
22983//
22984// value: List of dilation values.
22985// If not specified, defaults to <i:1 i:1 i:1 i:1 >
22986func QuantizedDepthwiseConv2DWithBiasDilations(value []int64) QuantizedDepthwiseConv2DWithBiasAttr {
22987	return func(m optionalAttr) {
22988		m["dilations"] = value
22989	}
22990}
22991
22992// Computes quantized depthwise Conv2D with Bias.
22993//
22994// Arguments:
22995//	input: The original input tensor.
22996//	filter: The original filter tensor.
22997//	bias: The original bias tensor.
22998//	min_input: The float value that the minimum quantized input value represents.
22999//	max_input: The float value that the maximum quantized input value represents.
23000//	min_filter: The float value that the minimum quantized filter value represents.
23001//	max_filter: The float value that the maximum quantized filter value represents.
23002//	strides: List of stride values.
23003//
23004//
23005// Returns:
23006//	output: The output tensor.
23007//	min_output: The float value that the minimum quantized output value represents.
23008//	max_output: The float value that the maximum quantized output value represents.
23009func QuantizedDepthwiseConv2DWithBias(scope *Scope, input tf.Output, filter tf.Output, bias tf.Output, min_input tf.Output, max_input tf.Output, min_filter tf.Output, max_filter tf.Output, strides []int64, padding string, optional ...QuantizedDepthwiseConv2DWithBiasAttr) (output tf.Output, min_output tf.Output, max_output tf.Output) {
23010	if scope.Err() != nil {
23011		return
23012	}
23013	attrs := map[string]interface{}{"strides": strides, "padding": padding}
23014	for _, a := range optional {
23015		a(attrs)
23016	}
23017	opspec := tf.OpSpec{
23018		Type: "QuantizedDepthwiseConv2DWithBias",
23019		Input: []tf.Input{
23020			input, filter, bias, min_input, max_input, min_filter, max_filter,
23021		},
23022		Attrs: attrs,
23023	}
23024	op := scope.AddOperation(opspec)
23025	return op.Output(0), op.Output(1), op.Output(2)
23026}
23027
23028// QuantizedDepthwiseConv2DAttr is an optional argument to QuantizedDepthwiseConv2D.
23029type QuantizedDepthwiseConv2DAttr func(optionalAttr)
23030
23031// QuantizedDepthwiseConv2DOutType sets the optional out_type attribute to value.
23032//
23033// value: The type of the output.
23034// If not specified, defaults to DT_QINT32
23035func QuantizedDepthwiseConv2DOutType(value tf.DataType) QuantizedDepthwiseConv2DAttr {
23036	return func(m optionalAttr) {
23037		m["out_type"] = value
23038	}
23039}
23040
23041// QuantizedDepthwiseConv2DDilations sets the optional dilations attribute to value.
23042//
23043// value: List of dilation values.
23044// If not specified, defaults to <i:1 i:1 i:1 i:1 >
23045func QuantizedDepthwiseConv2DDilations(value []int64) QuantizedDepthwiseConv2DAttr {
23046	return func(m optionalAttr) {
23047		m["dilations"] = value
23048	}
23049}
23050
23051// Computes quantized depthwise Conv2D.
23052//
23053// Arguments:
23054//	input: The original input tensor.
23055//	filter: The original filter tensor.
23056//	min_input: The float value that the minimum quantized input value represents.
23057//	max_input: The float value that the maximum quantized input value represents.
23058//	min_filter: The float value that the minimum quantized filter value represents.
23059//	max_filter: The float value that the maximum quantized filter value represents.
23060//	strides: List of stride values.
23061//
23062//
23063// Returns:
23064//	output: The output tensor.
23065//	min_output: The float value that the minimum quantized output value represents.
23066//	max_output: The float value that the maximum quantized output value represents.
23067func QuantizedDepthwiseConv2D(scope *Scope, input tf.Output, filter tf.Output, min_input tf.Output, max_input tf.Output, min_filter tf.Output, max_filter tf.Output, strides []int64, padding string, optional ...QuantizedDepthwiseConv2DAttr) (output tf.Output, min_output tf.Output, max_output tf.Output) {
23068	if scope.Err() != nil {
23069		return
23070	}
23071	attrs := map[string]interface{}{"strides": strides, "padding": padding}
23072	for _, a := range optional {
23073		a(attrs)
23074	}
23075	opspec := tf.OpSpec{
23076		Type: "QuantizedDepthwiseConv2D",
23077		Input: []tf.Input{
23078			input, filter, min_input, max_input, min_filter, max_filter,
23079		},
23080		Attrs: attrs,
23081	}
23082	op := scope.AddOperation(opspec)
23083	return op.Output(0), op.Output(1), op.Output(2)
23084}
23085
23086// QuantizedConv2DPerChannelAttr is an optional argument to QuantizedConv2DPerChannel.
23087type QuantizedConv2DPerChannelAttr func(optionalAttr)
23088
23089// QuantizedConv2DPerChannelOutType sets the optional out_type attribute to value.
23090//
23091// value: The quantized type of output tensor that needs to be converted.
23092// If not specified, defaults to DT_QINT32
23093func QuantizedConv2DPerChannelOutType(value tf.DataType) QuantizedConv2DPerChannelAttr {
23094	return func(m optionalAttr) {
23095		m["out_type"] = value
23096	}
23097}
23098
23099// QuantizedConv2DPerChannelDilations sets the optional dilations attribute to value.
23100//
23101// value: list of dilation values.
23102// If not specified, defaults to <i:1 i:1 i:1 i:1 >
23103func QuantizedConv2DPerChannelDilations(value []int64) QuantizedConv2DPerChannelAttr {
23104	return func(m optionalAttr) {
23105		m["dilations"] = value
23106	}
23107}
23108
23109// Computes QuantizedConv2D per channel.
23110//
23111// Arguments:
23112//	input: The original input tensor.
23113//	filter: The original filter tensor.
23114//	min_input: The minimum value of the input tensor
23115//	max_input: The maximum value of the input tensor.
23116//	min_filter: The minimum value of the filter tensor.
23117//	max_filter: The maximum value of the filter tensor.
23118//	strides: list of stride values.
23119//
23120//
23121// Returns:
23122//	output: The output tensor.
23123//	min_output: The minimum value of the final output tensor.
23124//	max_output: The maximum value of the final output tensor.
23125func QuantizedConv2DPerChannel(scope *Scope, input tf.Output, filter tf.Output, min_input tf.Output, max_input tf.Output, min_filter tf.Output, max_filter tf.Output, strides []int64, padding string, optional ...QuantizedConv2DPerChannelAttr) (output tf.Output, min_output tf.Output, max_output tf.Output) {
23126	if scope.Err() != nil {
23127		return
23128	}
23129	attrs := map[string]interface{}{"strides": strides, "padding": padding}
23130	for _, a := range optional {
23131		a(attrs)
23132	}
23133	opspec := tf.OpSpec{
23134		Type: "QuantizedConv2DPerChannel",
23135		Input: []tf.Input{
23136			input, filter, min_input, max_input, min_filter, max_filter,
23137		},
23138		Attrs: attrs,
23139	}
23140	op := scope.AddOperation(opspec)
23141	return op.Output(0), op.Output(1), op.Output(2)
23142}
23143
23144// A container for a multi device iterator resource.
23145//
23146// Returns:
23147//	handle: A handle to a multi device iterator that can be passed to a
23148// "MultiDeviceIteratorGetNextFromShard" op. In contrast to MultiDeviceIterator,
23149// AnonymousIterator prevents resource sharing by name, and does not keep a
23150// reference to the resource container.
23151//	deleter: A variant deleter that should be passed into the op that deletes the iterator.
23152func AnonymousMultiDeviceIterator(scope *Scope, devices []string, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output, deleter tf.Output) {
23153	if scope.Err() != nil {
23154		return
23155	}
23156	attrs := map[string]interface{}{"devices": devices, "output_types": output_types, "output_shapes": output_shapes}
23157	opspec := tf.OpSpec{
23158		Type: "AnonymousMultiDeviceIterator",
23159
23160		Attrs: attrs,
23161	}
23162	op := scope.AddOperation(opspec)
23163	return op.Output(0), op.Output(1)
23164}
23165
23166// Provides the time since epoch in seconds.
23167//
23168// Returns the timestamp as a `float64` for seconds since the Unix epoch.
23169//
23170// Note: the timestamp is computed when the op is executed, not when it is added
23171// to the graph.
23172func Timestamp(scope *Scope) (ts tf.Output) {
23173	if scope.Err() != nil {
23174		return
23175	}
23176	opspec := tf.OpSpec{
23177		Type: "Timestamp",
23178	}
23179	op := scope.AddOperation(opspec)
23180	return op.Output(0)
23181}
23182
23183// Returns the truth value of (x <= y) element-wise.
23184//
23185// *NOTE*: `LessEqual` supports broadcasting. More about broadcasting
23186// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
23187//
23188// Example:
23189//
23190// ```python
23191// x = tf.constant([5, 4, 6])
23192// y = tf.constant([5])
23193// tf.math.less_equal(x, y) ==> [True, True, False]
23194//
23195// x = tf.constant([5, 4, 6])
23196// y = tf.constant([5, 6, 6])
23197// tf.math.less_equal(x, y) ==> [True, True, True]
23198// ```
23199func LessEqual(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
23200	if scope.Err() != nil {
23201		return
23202	}
23203	opspec := tf.OpSpec{
23204		Type: "LessEqual",
23205		Input: []tf.Input{
23206			x, y,
23207		},
23208	}
23209	op := scope.AddOperation(opspec)
23210	return op.Output(0)
23211}
23212
23213// LoadTPUEmbeddingADAMParametersGradAccumDebugAttr is an optional argument to LoadTPUEmbeddingADAMParametersGradAccumDebug.
23214type LoadTPUEmbeddingADAMParametersGradAccumDebugAttr func(optionalAttr)
23215
23216// LoadTPUEmbeddingADAMParametersGradAccumDebugTableId sets the optional table_id attribute to value.
23217// If not specified, defaults to -1
23218func LoadTPUEmbeddingADAMParametersGradAccumDebugTableId(value int64) LoadTPUEmbeddingADAMParametersGradAccumDebugAttr {
23219	return func(m optionalAttr) {
23220		m["table_id"] = value
23221	}
23222}
23223
23224// LoadTPUEmbeddingADAMParametersGradAccumDebugTableName sets the optional table_name attribute to value.
23225// If not specified, defaults to ""
23226func LoadTPUEmbeddingADAMParametersGradAccumDebugTableName(value string) LoadTPUEmbeddingADAMParametersGradAccumDebugAttr {
23227	return func(m optionalAttr) {
23228		m["table_name"] = value
23229	}
23230}
23231
23232// LoadTPUEmbeddingADAMParametersGradAccumDebugConfig sets the optional config attribute to value.
23233// If not specified, defaults to ""
23234func LoadTPUEmbeddingADAMParametersGradAccumDebugConfig(value string) LoadTPUEmbeddingADAMParametersGradAccumDebugAttr {
23235	return func(m optionalAttr) {
23236		m["config"] = value
23237	}
23238}
23239
23240// Load ADAM embedding parameters with debug support.
23241//
23242// An op that loads optimization parameters into HBM for embedding. Must be
23243// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
23244// embedding table configuration. For example, this op is used to install
23245// parameters that are loaded from a checkpoint before a training loop is
23246// executed.
23247//
23248// Arguments:
23249//	parameters: Value of parameters used in the ADAM optimization algorithm.
23250//	momenta: Value of momenta used in the ADAM optimization algorithm.
23251//	velocities: Value of velocities used in the ADAM optimization algorithm.
23252//	gradient_accumulators: Value of gradient_accumulators used in the ADAM optimization algorithm.
23253//
23254//
23255//
23256// Returns the created operation.
23257func LoadTPUEmbeddingADAMParametersGradAccumDebug(scope *Scope, parameters tf.Output, momenta tf.Output, velocities tf.Output, gradient_accumulators tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingADAMParametersGradAccumDebugAttr) (o *tf.Operation) {
23258	if scope.Err() != nil {
23259		return
23260	}
23261	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
23262	for _, a := range optional {
23263		a(attrs)
23264	}
23265	opspec := tf.OpSpec{
23266		Type: "LoadTPUEmbeddingADAMParametersGradAccumDebug",
23267		Input: []tf.Input{
23268			parameters, momenta, velocities, gradient_accumulators,
23269		},
23270		Attrs: attrs,
23271	}
23272	return scope.AddOperation(opspec)
23273}
23274
23275// RetrieveTPUEmbeddingRMSPropParametersAttr is an optional argument to RetrieveTPUEmbeddingRMSPropParameters.
23276type RetrieveTPUEmbeddingRMSPropParametersAttr func(optionalAttr)
23277
23278// RetrieveTPUEmbeddingRMSPropParametersTableId sets the optional table_id attribute to value.
23279// If not specified, defaults to -1
23280func RetrieveTPUEmbeddingRMSPropParametersTableId(value int64) RetrieveTPUEmbeddingRMSPropParametersAttr {
23281	return func(m optionalAttr) {
23282		m["table_id"] = value
23283	}
23284}
23285
23286// RetrieveTPUEmbeddingRMSPropParametersTableName sets the optional table_name attribute to value.
23287// If not specified, defaults to ""
23288func RetrieveTPUEmbeddingRMSPropParametersTableName(value string) RetrieveTPUEmbeddingRMSPropParametersAttr {
23289	return func(m optionalAttr) {
23290		m["table_name"] = value
23291	}
23292}
23293
23294// RetrieveTPUEmbeddingRMSPropParametersConfig sets the optional config attribute to value.
23295// If not specified, defaults to ""
23296func RetrieveTPUEmbeddingRMSPropParametersConfig(value string) RetrieveTPUEmbeddingRMSPropParametersAttr {
23297	return func(m optionalAttr) {
23298		m["config"] = value
23299	}
23300}
23301
23302// Retrieve RMSProp embedding parameters.
23303//
23304// An op that retrieves optimization parameters from embedding to host
23305// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
23306// the correct embedding table configuration. For example, this op is
23307// used to retrieve updated parameters before saving a checkpoint.
23308//
23309// Returns:
23310//	parameters: Parameter parameters updated by the RMSProp optimization algorithm.
23311//	ms: Parameter ms updated by the RMSProp optimization algorithm.
23312//	mom: Parameter mom updated by the RMSProp optimization algorithm.
23313func RetrieveTPUEmbeddingRMSPropParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingRMSPropParametersAttr) (parameters tf.Output, ms tf.Output, mom tf.Output) {
23314	if scope.Err() != nil {
23315		return
23316	}
23317	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
23318	for _, a := range optional {
23319		a(attrs)
23320	}
23321	opspec := tf.OpSpec{
23322		Type: "RetrieveTPUEmbeddingRMSPropParameters",
23323
23324		Attrs: attrs,
23325	}
23326	op := scope.AddOperation(opspec)
23327	return op.Output(0), op.Output(1), op.Output(2)
23328}
23329
23330// QuantizedMatMulWithBiasAttr is an optional argument to QuantizedMatMulWithBias.
23331type QuantizedMatMulWithBiasAttr func(optionalAttr)
23332
23333// QuantizedMatMulWithBiasToutput sets the optional Toutput attribute to value.
23334// If not specified, defaults to DT_QINT32
23335func QuantizedMatMulWithBiasToutput(value tf.DataType) QuantizedMatMulWithBiasAttr {
23336	return func(m optionalAttr) {
23337		m["Toutput"] = value
23338	}
23339}
23340
23341// QuantizedMatMulWithBiasTransposeA sets the optional transpose_a attribute to value.
23342//
23343// value: If true, `a` is transposed before multiplication.
23344// If not specified, defaults to false
23345func QuantizedMatMulWithBiasTransposeA(value bool) QuantizedMatMulWithBiasAttr {
23346	return func(m optionalAttr) {
23347		m["transpose_a"] = value
23348	}
23349}
23350
23351// QuantizedMatMulWithBiasTransposeB sets the optional transpose_b attribute to value.
23352//
23353// value: If true, `b` is transposed before multiplication.
23354// If not specified, defaults to false
23355func QuantizedMatMulWithBiasTransposeB(value bool) QuantizedMatMulWithBiasAttr {
23356	return func(m optionalAttr) {
23357		m["transpose_b"] = value
23358	}
23359}
23360
23361// QuantizedMatMulWithBiasInputQuantMode sets the optional input_quant_mode attribute to value.
23362//
23363// value: Input data quantization mode. Either MIN_FIRST(default) or SCALED.
23364// If not specified, defaults to "MIN_FIRST"
23365func QuantizedMatMulWithBiasInputQuantMode(value string) QuantizedMatMulWithBiasAttr {
23366	return func(m optionalAttr) {
23367		m["input_quant_mode"] = value
23368	}
23369}
23370
23371// Performs a quantized matrix multiplication of `a` by the matrix `b` with bias
23372// add.
23373//
23374// The inputs must be two-dimensional matrices and 1D bias vector. And the inner
23375// dimension of `a` (after being transposed if `transpose_a` is non-zero) must
23376// match the outer dimension of `b` (after being transposed if `transposed_b` is
23377// non-zero). Then do broadcast add operation with bias values on the matrix
23378// multiplication result. The bias size must match inner dimension of `b`.
23379//
23380// Arguments:
23381//	a: A matrix to be multiplied. Must be a two-dimensional tensor of type `quint8`.
23382//	b: A matrix to be multiplied and must be a two-dimensional tensor of type `qint8`.
23383//	bias: A 1D bias tensor with size matching inner dimension of `b` (after being
23384// transposed if `transposed_b` is non-zero).
23385//	min_a: The float value that the lowest quantized `a` value represents.
23386//	max_a: The float value that the highest quantized `a` value represents.
23387//	min_b: The float value that the lowest quantized `b` value represents.
23388//	max_b: The float value that the highest quantized `b` value represents.
23389//
23390// Returns:
23391//	out
23392//	min_out: The float value that the lowest quantized output value represents.
23393//	max_out: The float value that the highest quantized output value represents.
23394func QuantizedMatMulWithBias(scope *Scope, a tf.Output, b tf.Output, bias tf.Output, min_a tf.Output, max_a tf.Output, min_b tf.Output, max_b tf.Output, optional ...QuantizedMatMulWithBiasAttr) (out tf.Output, min_out tf.Output, max_out tf.Output) {
23395	if scope.Err() != nil {
23396		return
23397	}
23398	attrs := map[string]interface{}{}
23399	for _, a := range optional {
23400		a(attrs)
23401	}
23402	opspec := tf.OpSpec{
23403		Type: "QuantizedMatMulWithBias",
23404		Input: []tf.Input{
23405			a, b, bias, min_a, max_a, min_b, max_b,
23406		},
23407		Attrs: attrs,
23408	}
23409	op := scope.AddOperation(opspec)
23410	return op.Output(0), op.Output(1), op.Output(2)
23411}
23412
23413// TensorArrayGatherV2Attr is an optional argument to TensorArrayGatherV2.
23414type TensorArrayGatherV2Attr func(optionalAttr)
23415
23416// TensorArrayGatherV2ElementShape sets the optional element_shape attribute to value.
23417// If not specified, defaults to <unknown_rank:true >
23418func TensorArrayGatherV2ElementShape(value tf.Shape) TensorArrayGatherV2Attr {
23419	return func(m optionalAttr) {
23420		m["element_shape"] = value
23421	}
23422}
23423
23424// Deprecated. Use TensorArrayGatherV3
23425//
23426// DEPRECATED at GraphDef version 26: Use TensorArrayGatherV3
23427func TensorArrayGatherV2(scope *Scope, handle tf.Output, indices tf.Output, flow_in tf.Output, dtype tf.DataType, optional ...TensorArrayGatherV2Attr) (value tf.Output) {
23428	if scope.Err() != nil {
23429		return
23430	}
23431	attrs := map[string]interface{}{"dtype": dtype}
23432	for _, a := range optional {
23433		a(attrs)
23434	}
23435	opspec := tf.OpSpec{
23436		Type: "TensorArrayGatherV2",
23437		Input: []tf.Input{
23438			handle, indices, flow_in,
23439		},
23440		Attrs: attrs,
23441	}
23442	op := scope.AddOperation(opspec)
23443	return op.Output(0)
23444}
23445
23446// RFFT3DAttr is an optional argument to RFFT3D.
23447type RFFT3DAttr func(optionalAttr)
23448
23449// RFFT3DTcomplex sets the optional Tcomplex attribute to value.
23450// If not specified, defaults to DT_COMPLEX64
23451func RFFT3DTcomplex(value tf.DataType) RFFT3DAttr {
23452	return func(m optionalAttr) {
23453		m["Tcomplex"] = value
23454	}
23455}
23456
23457// 3D real-valued fast Fourier transform.
23458//
23459// Computes the 3-dimensional discrete Fourier transform of a real-valued signal
23460// over the inner-most 3 dimensions of `input`.
23461//
23462// Since the DFT of a real signal is Hermitian-symmetric, `RFFT3D` only returns the
23463// `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension
23464// of `output`: the zero-frequency term, followed by the `fft_length / 2`
23465// positive-frequency terms.
23466//
23467// Along each axis `RFFT3D` is computed on, if `fft_length` is smaller than the
23468// corresponding dimension of `input`, the dimension is cropped. If it is larger,
23469// the dimension is padded with zeros.
23470//
23471// Arguments:
23472//	input: A float32 tensor.
23473//	fft_length: An int32 tensor of shape [3]. The FFT length for each dimension.
23474//
23475// Returns A complex64 tensor of the same rank as `input`. The inner-most 3
23476//   dimensions of `input` are replaced with the their 3D Fourier transform. The
23477//   inner-most dimension contains `fft_length / 2 + 1` unique frequency
23478//   components.
23479//
23480// @compatibility(numpy)
23481// Equivalent to np.fft.rfftn with 3 dimensions.
23482// @end_compatibility
23483func RFFT3D(scope *Scope, input tf.Output, fft_length tf.Output, optional ...RFFT3DAttr) (output tf.Output) {
23484	if scope.Err() != nil {
23485		return
23486	}
23487	attrs := map[string]interface{}{}
23488	for _, a := range optional {
23489		a(attrs)
23490	}
23491	opspec := tf.OpSpec{
23492		Type: "RFFT3D",
23493		Input: []tf.Input{
23494			input, fft_length,
23495		},
23496		Attrs: attrs,
23497	}
23498	op := scope.AddOperation(opspec)
23499	return op.Output(0)
23500}
23501
23502// Reorders a SparseTensor into the canonical, row-major ordering.
23503//
23504// Note that by convention, all sparse ops preserve the canonical ordering along
23505// increasing dimension number. The only time ordering can be violated is during
23506// manual manipulation of the indices and values vectors to add entries.
23507//
23508// Reordering does not affect the shape of the SparseTensor.
23509//
23510// If the tensor has rank `R` and `N` non-empty values, `input_indices` has
23511// shape `[N, R]`, input_values has length `N`, and input_shape has length `R`.
23512//
23513// Arguments:
23514//	input_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
23515// SparseTensor, possibly not in canonical ordering.
23516//	input_values: 1-D.  `N` non-empty values corresponding to `input_indices`.
23517//	input_shape: 1-D.  Shape of the input SparseTensor.
23518//
23519// Returns:
23520//	output_indices: 2-D.  `N x R` matrix with the same indices as input_indices, but
23521// in canonical row-major ordering.
23522//	output_values: 1-D.  `N` non-empty values corresponding to `output_indices`.
23523func SparseReorder(scope *Scope, input_indices tf.Output, input_values tf.Output, input_shape tf.Output) (output_indices tf.Output, output_values tf.Output) {
23524	if scope.Err() != nil {
23525		return
23526	}
23527	opspec := tf.OpSpec{
23528		Type: "SparseReorder",
23529		Input: []tf.Input{
23530			input_indices, input_values, input_shape,
23531		},
23532	}
23533	op := scope.AddOperation(opspec)
23534	return op.Output(0), op.Output(1)
23535}
23536
23537// Generates fingerprint values.
23538//
23539// Generates fingerprint values of `data`.
23540//
23541// Fingerprint op considers the first dimension of `data` as the batch dimension,
23542// and `output[i]` contains the fingerprint value generated from contents in
23543// `data[i, ...]` for all `i`.
23544//
23545// Fingerprint op writes fingerprint values as byte arrays. For example, the
23546// default method `farmhash64` generates a 64-bit fingerprint value at a time.
23547// This 8-byte value is written out as an `uint8` array of size 8, in little-endian
23548// order.
23549//
23550// For example, suppose that `data` has data type `DT_INT32` and shape (2, 3, 4),
23551// and that the fingerprint method is `farmhash64`. In this case, the output shape
23552// is (2, 8), where 2 is the batch dimension size of `data`, and 8 is the size of
23553// each fingerprint value in bytes. `output[0, :]` is generated from 12 integers in
23554// `data[0, :, :]` and similarly `output[1, :]` is generated from other 12 integers
23555// in `data[1, :, :]`.
23556//
23557// Note that this op fingerprints the raw underlying buffer, and it does not
23558// fingerprint Tensor's metadata such as data type and/or shape. For example, the
23559// fingerprint values are invariant under reshapes and bitcasts as long as the
23560// batch dimension remain the same:
23561//
23562// ```
23563// Fingerprint(data) == Fingerprint(Reshape(data, ...))
23564// Fingerprint(data) == Fingerprint(Bitcast(data, ...))
23565// ```
23566//
23567// For string data, one should expect `Fingerprint(data) !=
23568// Fingerprint(ReduceJoin(data))` in general.
23569//
23570// Arguments:
23571//	data: Must have rank 1 or higher.
23572//	method: Fingerprint method used by this op. Currently available method is
23573// `farmhash::fingerprint64`.
23574//
23575// Returns A two-dimensional `Tensor` of type `tf.uint8`. The first dimension equals to
23576// `data`'s first dimension, and the second dimension size depends on the
23577// fingerprint algorithm.
23578func Fingerprint(scope *Scope, data tf.Output, method tf.Output) (fingerprint tf.Output) {
23579	if scope.Err() != nil {
23580		return
23581	}
23582	opspec := tf.OpSpec{
23583		Type: "Fingerprint",
23584		Input: []tf.Input{
23585			data, method,
23586		},
23587	}
23588	op := scope.AddOperation(opspec)
23589	return op.Output(0)
23590}
23591
23592// CopyAttr is an optional argument to Copy.
23593type CopyAttr func(optionalAttr)
23594
23595// CopyTensorName sets the optional tensor_name attribute to value.
23596//
23597// value: The name of the input tensor.
23598// If not specified, defaults to ""
23599func CopyTensorName(value string) CopyAttr {
23600	return func(m optionalAttr) {
23601		m["tensor_name"] = value
23602	}
23603}
23604
23605// CopyDebugOpsSpec sets the optional debug_ops_spec attribute to value.
23606//
23607// value: A list of debug op spec (op, url, gated_grpc) for attached debug
23608// ops. Each element of the list has the format
23609// <debug_op>;<grpc_url>;<gated_grpc>, wherein gated_grpc is boolean represented
23610// as 0/1. E.g., "DebugIdentity;grpc://foo:3333;1",
23611// "DebugIdentity;file:///tmp/tfdbg_1;0".
23612// If not specified, defaults to <>
23613func CopyDebugOpsSpec(value []string) CopyAttr {
23614	return func(m optionalAttr) {
23615		m["debug_ops_spec"] = value
23616	}
23617}
23618
23619// Copy a tensor from CPU-to-CPU or GPU-to-GPU.
23620//
23621// Performs CPU-to-CPU or GPU-to-GPU deep-copying of tensor, depending on the
23622// device on which the tensor is allocated.
23623// N.B.: If the all downstream attached debug ops are disabled given the current
23624// gRPC gating status, the output will simply forward the input tensor without
23625// deep-copying. See the documentation of Debug* ops for more details.
23626//
23627// Unlike the CopyHost Op, this op does not have HostMemory constraint on its
23628// input or output.
23629//
23630// Arguments:
23631//	input: Input tensor.
23632func Copy(scope *Scope, input tf.Output, optional ...CopyAttr) (output tf.Output) {
23633	if scope.Err() != nil {
23634		return
23635	}
23636	attrs := map[string]interface{}{}
23637	for _, a := range optional {
23638		a(attrs)
23639	}
23640	opspec := tf.OpSpec{
23641		Type: "Copy",
23642		Input: []tf.Input{
23643			input,
23644		},
23645		Attrs: attrs,
23646	}
23647	op := scope.AddOperation(opspec)
23648	return op.Output(0)
23649}
23650
23651// Updates specified rows 'i' with values 'v'.
23652//
23653// Computes `x[i, :] = v; return x`.
23654//
23655// Originally this function is mutative however for compilation we make this
23656// operation create / operate on a copy of `x`.
23657//
23658// Arguments:
23659//	x: A tensor of type `T`.
23660//	i: A vector. Indices into the left-most dimension of `x`.
23661//	v: A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size.
23662//
23663// Returns A `Tensor` of type T. An alias of `x`. The content of `y` is undefined if there are duplicates in `i`.
23664func InplaceUpdate(scope *Scope, x tf.Output, i tf.Output, v tf.Output) (y tf.Output) {
23665	if scope.Err() != nil {
23666		return
23667	}
23668	opspec := tf.OpSpec{
23669		Type: "InplaceUpdate",
23670		Input: []tf.Input{
23671			x, i, v,
23672		},
23673	}
23674	op := scope.AddOperation(opspec)
23675	return op.Output(0)
23676}
23677
23678// Table initializer that takes two tensors for keys and values respectively.
23679//
23680// Arguments:
23681//	table_handle: Handle to a table which will be initialized.
23682//	keys: Keys of type Tkey.
23683//	values: Values of type Tval.
23684//
23685// Returns the created operation.
23686func InitializeTableV2(scope *Scope, table_handle tf.Output, keys tf.Output, values tf.Output) (o *tf.Operation) {
23687	if scope.Err() != nil {
23688		return
23689	}
23690	opspec := tf.OpSpec{
23691		Type: "InitializeTableV2",
23692		Input: []tf.Input{
23693			table_handle, keys, values,
23694		},
23695	}
23696	return scope.AddOperation(opspec)
23697}
23698
23699// BatchToSpace for N-D tensors of type T.
23700//
23701// This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of shape
23702// `block_shape + [batch]`, interleaves these blocks back into the grid defined by
23703// the spatial dimensions `[1, ..., M]`, to obtain a result with the same rank as
23704// the input.  The spatial dimensions of this intermediate result are then
23705// optionally cropped according to `crops` to produce the output.  This is the
23706// reverse of SpaceToBatch.  See below for a precise description.
23707//
23708// Arguments:
23709//	input: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,
23710// where spatial_shape has M dimensions.
23711//	block_shape: 1-D with shape `[M]`, all values must be >= 1.
23712//	crops: 2-D with shape `[M, 2]`, all values must be >= 0.
23713//   `crops[i] = [crop_start, crop_end]` specifies the amount to crop from input
23714//   dimension `i + 1`, which corresponds to spatial dimension `i`.  It is
23715//   required that
23716//   `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`.
23717//
23718// This operation is equivalent to the following steps:
23719//
23720// 1. Reshape `input` to `reshaped` of shape:
23721//      [block_shape[0], ..., block_shape[M-1],
23722//       batch / prod(block_shape),
23723//       input_shape[1], ..., input_shape[N-1]]
23724//
23725// 2. Permute dimensions of `reshaped` to produce `permuted` of shape
23726//      [batch / prod(block_shape),
23727//
23728//       input_shape[1], block_shape[0],
23729//       ...,
23730//       input_shape[M], block_shape[M-1],
23731//
23732//       input_shape[M+1], ..., input_shape[N-1]]
23733//
23734// 3. Reshape `permuted` to produce `reshaped_permuted` of shape
23735//      [batch / prod(block_shape),
23736//
23737//       input_shape[1] * block_shape[0],
23738//       ...,
23739//       input_shape[M] * block_shape[M-1],
23740//
23741//       input_shape[M+1],
23742//       ...,
23743//       input_shape[N-1]]
23744//
23745// 4. Crop the start and end of dimensions `[1, ..., M]` of
23746//    `reshaped_permuted` according to `crops` to produce the output of shape:
23747//      [batch / prod(block_shape),
23748//
23749//       input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1],
23750//       ...,
23751//       input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1],
23752//
23753//       input_shape[M+1], ..., input_shape[N-1]]
23754//
23755// Some examples:
23756//
23757// (1) For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, and
23758//     `crops = [[0, 0], [0, 0]]`:
23759//
23760// ```
23761// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
23762// ```
23763//
23764// The output tensor has shape `[1, 2, 2, 1]` and value:
23765//
23766// ```
23767// x = [[[[1], [2]], [[3], [4]]]]
23768// ```
23769//
23770// (2) For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, and
23771//     `crops = [[0, 0], [0, 0]]`:
23772//
23773// ```
23774// [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
23775// ```
23776//
23777// The output tensor has shape `[1, 2, 2, 3]` and value:
23778//
23779// ```
23780// x = [[[[1, 2, 3], [4, 5, 6]],
23781//       [[7, 8, 9], [10, 11, 12]]]]
23782// ```
23783//
23784// (3) For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, and
23785//     `crops = [[0, 0], [0, 0]]`:
23786//
23787// ```
23788// x = [[[[1], [3]], [[9], [11]]],
23789//      [[[2], [4]], [[10], [12]]],
23790//      [[[5], [7]], [[13], [15]]],
23791//      [[[6], [8]], [[14], [16]]]]
23792// ```
23793//
23794// The output tensor has shape `[1, 4, 4, 1]` and value:
23795//
23796// ```
23797// x = [[[[1],   [2],  [3],  [4]],
23798//      [[5],   [6],  [7],  [8]],
23799//      [[9],  [10], [11],  [12]],
23800//      [[13], [14], [15],  [16]]]]
23801// ```
23802//
23803// (4) For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and
23804//     `crops = [[0, 0], [2, 0]]`:
23805//
23806// ```
23807// x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
23808//      [[[0], [2], [4]]], [[[0], [10], [12]]],
23809//      [[[0], [5], [7]]], [[[0], [13], [15]]],
23810//      [[[0], [6], [8]]], [[[0], [14], [16]]]]
23811// ```
23812//
23813// The output tensor has shape `[2, 2, 4, 1]` and value:
23814//
23815// ```
23816// x = [[[[1],   [2],  [3],  [4]],
23817//       [[5],   [6],  [7],  [8]]],
23818//      [[[9],  [10], [11],  [12]],
23819//       [[13], [14], [15],  [16]]]]
23820// ```
23821func BatchToSpaceND(scope *Scope, input tf.Output, block_shape tf.Output, crops tf.Output) (output tf.Output) {
23822	if scope.Err() != nil {
23823		return
23824	}
23825	opspec := tf.OpSpec{
23826		Type: "BatchToSpaceND",
23827		Input: []tf.Input{
23828			input, block_shape, crops,
23829		},
23830	}
23831	op := scope.AddOperation(opspec)
23832	return op.Output(0)
23833}
23834
23835// FIFOQueueV2Attr is an optional argument to FIFOQueueV2.
23836type FIFOQueueV2Attr func(optionalAttr)
23837
23838// FIFOQueueV2Shapes sets the optional shapes attribute to value.
23839//
23840// value: The shape of each component in a value. The length of this attr must
23841// be either 0 or the same as the length of component_types. If the length of
23842// this attr is 0, the shapes of queue elements are not constrained, and
23843// only one element may be dequeued at a time.
23844// If not specified, defaults to <>
23845//
23846// REQUIRES: len(value) >= 0
23847func FIFOQueueV2Shapes(value []tf.Shape) FIFOQueueV2Attr {
23848	return func(m optionalAttr) {
23849		m["shapes"] = value
23850	}
23851}
23852
23853// FIFOQueueV2Capacity sets the optional capacity attribute to value.
23854//
23855// value: The upper bound on the number of elements in this queue.
23856// Negative numbers mean no limit.
23857// If not specified, defaults to -1
23858func FIFOQueueV2Capacity(value int64) FIFOQueueV2Attr {
23859	return func(m optionalAttr) {
23860		m["capacity"] = value
23861	}
23862}
23863
23864// FIFOQueueV2Container sets the optional container attribute to value.
23865//
23866// value: If non-empty, this queue is placed in the given container.
23867// Otherwise, a default container is used.
23868// If not specified, defaults to ""
23869func FIFOQueueV2Container(value string) FIFOQueueV2Attr {
23870	return func(m optionalAttr) {
23871		m["container"] = value
23872	}
23873}
23874
23875// FIFOQueueV2SharedName sets the optional shared_name attribute to value.
23876//
23877// value: If non-empty, this queue will be shared under the given name
23878// across multiple sessions.
23879// If not specified, defaults to ""
23880func FIFOQueueV2SharedName(value string) FIFOQueueV2Attr {
23881	return func(m optionalAttr) {
23882		m["shared_name"] = value
23883	}
23884}
23885
23886// A queue that produces elements in first-in first-out order.
23887//
23888// Arguments:
23889//	component_types: The type of each component in a value.
23890//
23891// Returns The handle to the queue.
23892func FIFOQueueV2(scope *Scope, component_types []tf.DataType, optional ...FIFOQueueV2Attr) (handle tf.Output) {
23893	if scope.Err() != nil {
23894		return
23895	}
23896	attrs := map[string]interface{}{"component_types": component_types}
23897	for _, a := range optional {
23898		a(attrs)
23899	}
23900	opspec := tf.OpSpec{
23901		Type: "FIFOQueueV2",
23902
23903		Attrs: attrs,
23904	}
23905	op := scope.AddOperation(opspec)
23906	return op.Output(0)
23907}
23908
23909// Quantized Batch normalization.
23910//
23911// This op is deprecated and will be removed in the future. Prefer
23912// `tf.nn.batch_normalization`.
23913//
23914// Arguments:
23915//	t: A 4D input Tensor.
23916//	t_min: The value represented by the lowest quantized input.
23917//	t_max: The value represented by the highest quantized input.
23918//	m: A 1D mean Tensor with size matching the last dimension of t.
23919// This is the first output from tf.nn.moments,
23920// or a saved moving average thereof.
23921//	m_min: The value represented by the lowest quantized mean.
23922//	m_max: The value represented by the highest quantized mean.
23923//	v: A 1D variance Tensor with size matching the last dimension of t.
23924// This is the second output from tf.nn.moments,
23925// or a saved moving average thereof.
23926//	v_min: The value represented by the lowest quantized variance.
23927//	v_max: The value represented by the highest quantized variance.
23928//	beta: A 1D beta Tensor with size matching the last dimension of t.
23929// An offset to be added to the normalized tensor.
23930//	beta_min: The value represented by the lowest quantized offset.
23931//	beta_max: The value represented by the highest quantized offset.
23932//	gamma: A 1D gamma Tensor with size matching the last dimension of t.
23933// If "scale_after_normalization" is true, this tensor will be multiplied
23934// with the normalized tensor.
23935//	gamma_min: The value represented by the lowest quantized gamma.
23936//	gamma_max: The value represented by the highest quantized gamma.
23937//
23938//	variance_epsilon: A small float number to avoid dividing by 0.
23939//	scale_after_normalization: A bool indicating whether the resulted tensor
23940// needs to be multiplied with gamma.
23941func QuantizedBatchNormWithGlobalNormalization(scope *Scope, t tf.Output, t_min tf.Output, t_max tf.Output, m tf.Output, m_min tf.Output, m_max tf.Output, v tf.Output, v_min tf.Output, v_max tf.Output, beta tf.Output, beta_min tf.Output, beta_max tf.Output, gamma tf.Output, gamma_min tf.Output, gamma_max tf.Output, out_type tf.DataType, variance_epsilon float32, scale_after_normalization bool) (result tf.Output, result_min tf.Output, result_max tf.Output) {
23942	if scope.Err() != nil {
23943		return
23944	}
23945	attrs := map[string]interface{}{"out_type": out_type, "variance_epsilon": variance_epsilon, "scale_after_normalization": scale_after_normalization}
23946	opspec := tf.OpSpec{
23947		Type: "QuantizedBatchNormWithGlobalNormalization",
23948		Input: []tf.Input{
23949			t, t_min, t_max, m, m_min, m_max, v, v_min, v_max, beta, beta_min, beta_max, gamma, gamma_min, gamma_max,
23950		},
23951		Attrs: attrs,
23952	}
23953	op := scope.AddOperation(opspec)
23954	return op.Output(0), op.Output(1), op.Output(2)
23955}
23956
23957// ResourceStridedSliceAssignAttr is an optional argument to ResourceStridedSliceAssign.
23958type ResourceStridedSliceAssignAttr func(optionalAttr)
23959
23960// ResourceStridedSliceAssignBeginMask sets the optional begin_mask attribute to value.
23961// If not specified, defaults to 0
23962func ResourceStridedSliceAssignBeginMask(value int64) ResourceStridedSliceAssignAttr {
23963	return func(m optionalAttr) {
23964		m["begin_mask"] = value
23965	}
23966}
23967
23968// ResourceStridedSliceAssignEndMask sets the optional end_mask attribute to value.
23969// If not specified, defaults to 0
23970func ResourceStridedSliceAssignEndMask(value int64) ResourceStridedSliceAssignAttr {
23971	return func(m optionalAttr) {
23972		m["end_mask"] = value
23973	}
23974}
23975
23976// ResourceStridedSliceAssignEllipsisMask sets the optional ellipsis_mask attribute to value.
23977// If not specified, defaults to 0
23978func ResourceStridedSliceAssignEllipsisMask(value int64) ResourceStridedSliceAssignAttr {
23979	return func(m optionalAttr) {
23980		m["ellipsis_mask"] = value
23981	}
23982}
23983
23984// ResourceStridedSliceAssignNewAxisMask sets the optional new_axis_mask attribute to value.
23985// If not specified, defaults to 0
23986func ResourceStridedSliceAssignNewAxisMask(value int64) ResourceStridedSliceAssignAttr {
23987	return func(m optionalAttr) {
23988		m["new_axis_mask"] = value
23989	}
23990}
23991
23992// ResourceStridedSliceAssignShrinkAxisMask sets the optional shrink_axis_mask attribute to value.
23993// If not specified, defaults to 0
23994func ResourceStridedSliceAssignShrinkAxisMask(value int64) ResourceStridedSliceAssignAttr {
23995	return func(m optionalAttr) {
23996		m["shrink_axis_mask"] = value
23997	}
23998}
23999
24000// Assign `value` to the sliced l-value reference of `ref`.
24001//
24002// The values of `value` are assigned to the positions in the variable
24003// `ref` that are selected by the slice parameters. The slice parameters
24004// `begin, `end`, `strides`, etc. work exactly as in `StridedSlice`.
24005//
24006// NOTE this op currently does not support broadcasting and so `value`'s
24007// shape must be exactly the shape produced by the slice of `ref`.
24008//
24009// Returns the created operation.
24010func ResourceStridedSliceAssign(scope *Scope, ref tf.Output, begin tf.Output, end tf.Output, strides tf.Output, value tf.Output, optional ...ResourceStridedSliceAssignAttr) (o *tf.Operation) {
24011	if scope.Err() != nil {
24012		return
24013	}
24014	attrs := map[string]interface{}{}
24015	for _, a := range optional {
24016		a(attrs)
24017	}
24018	opspec := tf.OpSpec{
24019		Type: "ResourceStridedSliceAssign",
24020		Input: []tf.Input{
24021			ref, begin, end, strides, value,
24022		},
24023		Attrs: attrs,
24024	}
24025	return scope.AddOperation(opspec)
24026}
24027
24028// QuantizedRelu6Attr is an optional argument to QuantizedRelu6.
24029type QuantizedRelu6Attr func(optionalAttr)
24030
24031// QuantizedRelu6OutType sets the optional out_type attribute to value.
24032// If not specified, defaults to DT_QUINT8
24033func QuantizedRelu6OutType(value tf.DataType) QuantizedRelu6Attr {
24034	return func(m optionalAttr) {
24035		m["out_type"] = value
24036	}
24037}
24038
24039// Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)`
24040//
24041// Arguments:
24042//
24043//	min_features: The float value that the lowest quantized value represents.
24044//	max_features: The float value that the highest quantized value represents.
24045//
24046// Returns:
24047//	activations: Has the same output shape as "features".
24048//	min_activations: The float value that the lowest quantized value represents.
24049//	max_activations: The float value that the highest quantized value represents.
24050func QuantizedRelu6(scope *Scope, features tf.Output, min_features tf.Output, max_features tf.Output, optional ...QuantizedRelu6Attr) (activations tf.Output, min_activations tf.Output, max_activations tf.Output) {
24051	if scope.Err() != nil {
24052		return
24053	}
24054	attrs := map[string]interface{}{}
24055	for _, a := range optional {
24056		a(attrs)
24057	}
24058	opspec := tf.OpSpec{
24059		Type: "QuantizedRelu6",
24060		Input: []tf.Input{
24061			features, min_features, max_features,
24062		},
24063		Attrs: attrs,
24064	}
24065	op := scope.AddOperation(opspec)
24066	return op.Output(0), op.Output(1), op.Output(2)
24067}
24068
24069// DataFormatVecPermuteAttr is an optional argument to DataFormatVecPermute.
24070type DataFormatVecPermuteAttr func(optionalAttr)
24071
24072// DataFormatVecPermuteSrcFormat sets the optional src_format attribute to value.
24073//
24074// value: source data format.
24075// If not specified, defaults to "NHWC"
24076func DataFormatVecPermuteSrcFormat(value string) DataFormatVecPermuteAttr {
24077	return func(m optionalAttr) {
24078		m["src_format"] = value
24079	}
24080}
24081
24082// DataFormatVecPermuteDstFormat sets the optional dst_format attribute to value.
24083//
24084// value: destination data format.
24085// If not specified, defaults to "NCHW"
24086func DataFormatVecPermuteDstFormat(value string) DataFormatVecPermuteAttr {
24087	return func(m optionalAttr) {
24088		m["dst_format"] = value
24089	}
24090}
24091
24092// Permute input tensor from `src_format` to `dst_format`.
24093//
24094// Input tensor must be a vector of size 4, or a 4x2 tensor.
24095//
24096// For example, with `src_format` of `NHWC`, `dst_format` of `NCHW`, and inputs:
24097// ```
24098// [1, 2, 3, 4]
24099// ```
24100// and
24101// ```
24102// [[1, 2, 3, 4],
24103//  [5, 6, 7, 8]]
24104// ```
24105// , the outputs will be (respectively):
24106// ```
24107// [1, 4, 2, 3]
24108// ```
24109// and
24110// ```
24111// [[1, 4, 2, 3],
24112//  [5, 8, 6, 7]]
24113// ```
24114//
24115// Arguments:
24116//	x: Vector of size 4 or Tensor of shape (4, 2) in source data format.
24117//
24118// Returns Vector of size 4 or Tensor of shape (4, 2) in destination data format.
24119func DataFormatVecPermute(scope *Scope, x tf.Output, optional ...DataFormatVecPermuteAttr) (y tf.Output) {
24120	if scope.Err() != nil {
24121		return
24122	}
24123	attrs := map[string]interface{}{}
24124	for _, a := range optional {
24125		a(attrs)
24126	}
24127	opspec := tf.OpSpec{
24128		Type: "DataFormatVecPermute",
24129		Input: []tf.Input{
24130			x,
24131		},
24132		Attrs: attrs,
24133	}
24134	op := scope.AddOperation(opspec)
24135	return op.Output(0)
24136}
24137
24138// Adds Tensor 'bias' to Tensor 'input' for Quantized types.
24139//
24140// Broadcasts the values of bias on dimensions 0..N-2 of 'input'.
24141//
24142// Arguments:
24143//
24144//	bias: A 1D bias Tensor with size matching the last dimension of 'input'.
24145//	min_input: The float value that the lowest quantized input value represents.
24146//	max_input: The float value that the highest quantized input value represents.
24147//	min_bias: The float value that the lowest quantized bias value represents.
24148//	max_bias: The float value that the highest quantized bias value represents.
24149//
24150//
24151// Returns:
24152//	output
24153//	min_out: The float value that the lowest quantized output value represents.
24154//	max_out: The float value that the highest quantized output value represents.
24155func QuantizedBiasAdd(scope *Scope, input tf.Output, bias tf.Output, min_input tf.Output, max_input tf.Output, min_bias tf.Output, max_bias tf.Output, out_type tf.DataType) (output tf.Output, min_out tf.Output, max_out tf.Output) {
24156	if scope.Err() != nil {
24157		return
24158	}
24159	attrs := map[string]interface{}{"out_type": out_type}
24160	opspec := tf.OpSpec{
24161		Type: "QuantizedBiasAdd",
24162		Input: []tf.Input{
24163			input, bias, min_input, max_input, min_bias, max_bias,
24164		},
24165		Attrs: attrs,
24166	}
24167	op := scope.AddOperation(opspec)
24168	return op.Output(0), op.Output(1), op.Output(2)
24169}
24170
24171// MutableDenseHashTableV2Attr is an optional argument to MutableDenseHashTableV2.
24172type MutableDenseHashTableV2Attr func(optionalAttr)
24173
24174// MutableDenseHashTableV2Container sets the optional container attribute to value.
24175//
24176// value: If non-empty, this table is placed in the given container.
24177// Otherwise, a default container is used.
24178// If not specified, defaults to ""
24179func MutableDenseHashTableV2Container(value string) MutableDenseHashTableV2Attr {
24180	return func(m optionalAttr) {
24181		m["container"] = value
24182	}
24183}
24184
24185// MutableDenseHashTableV2SharedName sets the optional shared_name attribute to value.
24186//
24187// value: If non-empty, this table is shared under the given name across
24188// multiple sessions.
24189// If not specified, defaults to ""
24190func MutableDenseHashTableV2SharedName(value string) MutableDenseHashTableV2Attr {
24191	return func(m optionalAttr) {
24192		m["shared_name"] = value
24193	}
24194}
24195
24196// MutableDenseHashTableV2UseNodeNameSharing sets the optional use_node_name_sharing attribute to value.
24197// If not specified, defaults to false
24198func MutableDenseHashTableV2UseNodeNameSharing(value bool) MutableDenseHashTableV2Attr {
24199	return func(m optionalAttr) {
24200		m["use_node_name_sharing"] = value
24201	}
24202}
24203
24204// MutableDenseHashTableV2ValueShape sets the optional value_shape attribute to value.
24205//
24206// value: The shape of each value.
24207// If not specified, defaults to <>
24208func MutableDenseHashTableV2ValueShape(value tf.Shape) MutableDenseHashTableV2Attr {
24209	return func(m optionalAttr) {
24210		m["value_shape"] = value
24211	}
24212}
24213
24214// MutableDenseHashTableV2InitialNumBuckets sets the optional initial_num_buckets attribute to value.
24215//
24216// value: The initial number of hash table buckets. Must be a power
24217// to 2.
24218// If not specified, defaults to 131072
24219func MutableDenseHashTableV2InitialNumBuckets(value int64) MutableDenseHashTableV2Attr {
24220	return func(m optionalAttr) {
24221		m["initial_num_buckets"] = value
24222	}
24223}
24224
24225// MutableDenseHashTableV2MaxLoadFactor sets the optional max_load_factor attribute to value.
24226//
24227// value: The maximum ratio between number of entries and number of
24228// buckets before growing the table. Must be between 0 and 1.
24229// If not specified, defaults to 0.8
24230func MutableDenseHashTableV2MaxLoadFactor(value float32) MutableDenseHashTableV2Attr {
24231	return func(m optionalAttr) {
24232		m["max_load_factor"] = value
24233	}
24234}
24235
24236// Creates an empty hash table that uses tensors as the backing store.
24237//
24238// It uses "open addressing" with quadratic reprobing to resolve
24239// collisions.
24240//
24241// This op creates a mutable hash table, specifying the type of its keys and
24242// values. Each value must be a scalar. Data can be inserted into the table using
24243// the insert operations. It does not support the initialization operation.
24244//
24245// Arguments:
24246//	empty_key: The key used to represent empty key buckets internally. Must not
24247// be used in insert or lookup operations.
24248//
24249//	value_dtype: Type of the table values.
24250//
24251// Returns Handle to a table.
24252func MutableDenseHashTableV2(scope *Scope, empty_key tf.Output, deleted_key tf.Output, value_dtype tf.DataType, optional ...MutableDenseHashTableV2Attr) (table_handle tf.Output) {
24253	if scope.Err() != nil {
24254		return
24255	}
24256	attrs := map[string]interface{}{"value_dtype": value_dtype}
24257	for _, a := range optional {
24258		a(attrs)
24259	}
24260	opspec := tf.OpSpec{
24261		Type: "MutableDenseHashTableV2",
24262		Input: []tf.Input{
24263			empty_key, deleted_key,
24264		},
24265		Attrs: attrs,
24266	}
24267	op := scope.AddOperation(opspec)
24268	return op.Output(0)
24269}
24270
24271// FractionalAvgPoolGradAttr is an optional argument to FractionalAvgPoolGrad.
24272type FractionalAvgPoolGradAttr func(optionalAttr)
24273
24274// FractionalAvgPoolGradOverlapping sets the optional overlapping attribute to value.
24275//
24276// value: When set to True, it means when pooling, the values at the boundary
24277// of adjacent pooling cells are used by both cells. For example:
24278//
24279// `index  0  1  2  3  4`
24280//
24281// `value  20 5  16 3  7`
24282//
24283// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
24284// The result would be [41/3, 26/3] for fractional avg pooling.
24285// If not specified, defaults to false
24286func FractionalAvgPoolGradOverlapping(value bool) FractionalAvgPoolGradAttr {
24287	return func(m optionalAttr) {
24288		m["overlapping"] = value
24289	}
24290}
24291
24292// Computes gradient of the FractionalAvgPool function.
24293//
24294// Unlike FractionalMaxPoolGrad, we don't need to find arg_max for
24295// FractionalAvgPoolGrad, we just need to evenly back-propagate each element of
24296// out_backprop to those indices that form the same pooling cell. Therefore, we
24297// just need to know the shape of original input tensor, instead of the whole
24298// tensor.
24299//
24300// Arguments:
24301//	orig_input_tensor_shape: Original input tensor shape for `fractional_avg_pool`
24302//	out_backprop: 4-D with shape `[batch, height, width, channels]`.  Gradients
24303// w.r.t. the output of `fractional_avg_pool`.
24304//	row_pooling_sequence: row pooling sequence, form pooling region with
24305// col_pooling_sequence.
24306//	col_pooling_sequence: column pooling sequence, form pooling region with
24307// row_pooling sequence.
24308//
24309// Returns 4-D.  Gradients w.r.t. the input of `fractional_avg_pool`.
24310func FractionalAvgPoolGrad(scope *Scope, orig_input_tensor_shape tf.Output, out_backprop tf.Output, row_pooling_sequence tf.Output, col_pooling_sequence tf.Output, optional ...FractionalAvgPoolGradAttr) (output tf.Output) {
24311	if scope.Err() != nil {
24312		return
24313	}
24314	attrs := map[string]interface{}{}
24315	for _, a := range optional {
24316		a(attrs)
24317	}
24318	opspec := tf.OpSpec{
24319		Type: "FractionalAvgPoolGrad",
24320		Input: []tf.Input{
24321			orig_input_tensor_shape, out_backprop, row_pooling_sequence, col_pooling_sequence,
24322		},
24323		Attrs: attrs,
24324	}
24325	op := scope.AddOperation(opspec)
24326	return op.Output(0)
24327}
24328
24329// FractionalMaxPoolGradAttr is an optional argument to FractionalMaxPoolGrad.
24330type FractionalMaxPoolGradAttr func(optionalAttr)
24331
24332// FractionalMaxPoolGradOverlapping sets the optional overlapping attribute to value.
24333//
24334// value: When set to True, it means when pooling, the values at the boundary
24335// of adjacent pooling cells are used by both cells. For example:
24336//
24337// `index  0  1  2  3  4`
24338//
24339// `value  20 5  16 3  7`
24340//
24341// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
24342// The result would be [20, 16] for fractional max pooling.
24343// If not specified, defaults to false
24344func FractionalMaxPoolGradOverlapping(value bool) FractionalMaxPoolGradAttr {
24345	return func(m optionalAttr) {
24346		m["overlapping"] = value
24347	}
24348}
24349
24350// Computes gradient of the FractionalMaxPool function.
24351//
24352// Arguments:
24353//	orig_input: Original input for `fractional_max_pool`
24354//	orig_output: Original output for `fractional_max_pool`
24355//	out_backprop: 4-D with shape `[batch, height, width, channels]`.  Gradients
24356// w.r.t. the output of `fractional_max_pool`.
24357//	row_pooling_sequence: row pooling sequence, form pooling region with
24358// col_pooling_sequence.
24359//	col_pooling_sequence: column pooling sequence, form pooling region with
24360// row_pooling sequence.
24361//
24362// Returns 4-D.  Gradients w.r.t. the input of `fractional_max_pool`.
24363func FractionalMaxPoolGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, out_backprop tf.Output, row_pooling_sequence tf.Output, col_pooling_sequence tf.Output, optional ...FractionalMaxPoolGradAttr) (output tf.Output) {
24364	if scope.Err() != nil {
24365		return
24366	}
24367	attrs := map[string]interface{}{}
24368	for _, a := range optional {
24369		a(attrs)
24370	}
24371	opspec := tf.OpSpec{
24372		Type: "FractionalMaxPoolGrad",
24373		Input: []tf.Input{
24374			orig_input, orig_output, out_backprop, row_pooling_sequence, col_pooling_sequence,
24375		},
24376		Attrs: attrs,
24377	}
24378	op := scope.AddOperation(opspec)
24379	return op.Output(0)
24380}
24381
24382// Wraps the XLA ConvGeneralDilated operator, documented at
24383//
24384//  https://www.tensorflow.org/performance/xla/operation_semantics#conv_convolution
24385// .
24386//
24387// Arguments:
24388//	lhs: the input tensor
24389//	rhs: the kernel tensor
24390//	window_strides: the inter-window strides
24391//	padding: the padding to apply at the start and end of each input dimensions
24392//	lhs_dilation: dilation to apply between input elements
24393//	rhs_dilation: dilation to apply between kernel elements
24394//	feature_group_count: number of feature groups for grouped convolution.
24395//	dimension_numbers: a serialized xla::ConvolutionDimensionNumbers proto.
24396//	precision_config: a serialized xla::PrecisionConfig proto.
24397func XlaConv(scope *Scope, lhs tf.Output, rhs tf.Output, window_strides tf.Output, padding tf.Output, lhs_dilation tf.Output, rhs_dilation tf.Output, feature_group_count tf.Output, dimension_numbers string, precision_config string) (output tf.Output) {
24398	if scope.Err() != nil {
24399		return
24400	}
24401	attrs := map[string]interface{}{"dimension_numbers": dimension_numbers, "precision_config": precision_config}
24402	opspec := tf.OpSpec{
24403		Type: "XlaConv",
24404		Input: []tf.Input{
24405			lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation, feature_group_count,
24406		},
24407		Attrs: attrs,
24408	}
24409	op := scope.AddOperation(opspec)
24410	return op.Output(0)
24411}
24412
24413// NthElementAttr is an optional argument to NthElement.
24414type NthElementAttr func(optionalAttr)
24415
24416// NthElementReverse sets the optional reverse attribute to value.
24417//
24418// value: When set to True, find the nth-largest value in the vector and vice
24419// versa.
24420// If not specified, defaults to false
24421func NthElementReverse(value bool) NthElementAttr {
24422	return func(m optionalAttr) {
24423		m["reverse"] = value
24424	}
24425}
24426
24427// Finds values of the `n`-th order statistic for the last dimension.
24428//
24429// If the input is a vector (rank-1), finds the entries which is the nth-smallest
24430// value in the vector and outputs their values as scalar tensor.
24431//
24432// For matrices (resp. higher rank input), computes the entries which is the
24433// nth-smallest value in each row (resp. vector along the last dimension). Thus,
24434//
24435//     values.shape = input.shape[:-1]
24436//
24437// Arguments:
24438//	input: 1-D or higher with last dimension at least `n+1`.
24439//	n: 0-D. Position of sorted vector to select along the last dimension (along
24440// each row for matrices). Valid range of n is `[0, input.shape[:-1])`
24441//
24442// Returns The `n`-th order statistic along each last dimensional slice.
24443func NthElement(scope *Scope, input tf.Output, n tf.Output, optional ...NthElementAttr) (values tf.Output) {
24444	if scope.Err() != nil {
24445		return
24446	}
24447	attrs := map[string]interface{}{}
24448	for _, a := range optional {
24449		a(attrs)
24450	}
24451	opspec := tf.OpSpec{
24452		Type: "NthElement",
24453		Input: []tf.Input{
24454			input, n,
24455		},
24456		Attrs: attrs,
24457	}
24458	op := scope.AddOperation(opspec)
24459	return op.Output(0)
24460}
24461
24462// Pads a tensor.
24463//
24464// This operation pads `input` according to the `paddings` and `constant_values`
24465// you specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is
24466// the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
24467// how many padding values to add before the contents of `input` in that dimension,
24468// and `paddings[D, 1]` indicates how many padding values to add after the contents
24469// of `input` in that dimension. `constant_values` is a scalar tensor of the same
24470// type as `input` that indicates the value to use for padding `input`.
24471//
24472// The padded size of each dimension D of the output is:
24473//
24474// `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
24475//
24476// For example:
24477//
24478// ```
24479// # 't' is [[1, 1], [2, 2]]
24480// # 'paddings' is [[1, 1], [2, 2]]
24481// # 'constant_values' is 0
24482// # rank of 't' is 2
24483// pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
24484//                       [0, 0, 1, 1, 0, 0]
24485//                       [0, 0, 2, 2, 0, 0]
24486//                       [0, 0, 0, 0, 0, 0]]
24487// ```
24488func PadV2(scope *Scope, input tf.Output, paddings tf.Output, constant_values tf.Output) (output tf.Output) {
24489	if scope.Err() != nil {
24490		return
24491	}
24492	opspec := tf.OpSpec{
24493		Type: "PadV2",
24494		Input: []tf.Input{
24495			input, paddings, constant_values,
24496		},
24497	}
24498	op := scope.AddOperation(opspec)
24499	return op.Output(0)
24500}
24501
24502// Computes cos of x element-wise.
24503//
24504//   Given an input tensor, this function computes cosine of every
24505//   element in the tensor. Input range is `(-inf, inf)` and
24506//   output range is `[-1,1]`. If input lies outside the boundary, `nan`
24507//   is returned.
24508//
24509//   ```python
24510//   x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, float("inf")])
24511//   tf.math.cos(x) ==> [nan -0.91113025 0.87758255 0.5403023 0.36235774 0.48718765 -0.95215535 nan]
24512//   ```
24513func Cos(scope *Scope, x tf.Output) (y tf.Output) {
24514	if scope.Err() != nil {
24515		return
24516	}
24517	opspec := tf.OpSpec{
24518		Type: "Cos",
24519		Input: []tf.Input{
24520			x,
24521		},
24522	}
24523	op := scope.AddOperation(opspec)
24524	return op.Output(0)
24525}
24526
24527// TopKV2Attr is an optional argument to TopKV2.
24528type TopKV2Attr func(optionalAttr)
24529
24530// TopKV2Sorted sets the optional sorted attribute to value.
24531//
24532// value: If true the resulting `k` elements will be sorted by the values in
24533// descending order.
24534// If not specified, defaults to true
24535func TopKV2Sorted(value bool) TopKV2Attr {
24536	return func(m optionalAttr) {
24537		m["sorted"] = value
24538	}
24539}
24540
24541// Finds values and indices of the `k` largest elements for the last dimension.
24542//
24543// If the input is a vector (rank-1), finds the `k` largest entries in the vector
24544// and outputs their values and indices as vectors.  Thus `values[j]` is the
24545// `j`-th largest entry in `input`, and its index is `indices[j]`.
24546//
24547// For matrices (resp. higher rank input), computes the top `k` entries in each
24548// row (resp. vector along the last dimension).  Thus,
24549//
24550//     values.shape = indices.shape = input.shape[:-1] + [k]
24551//
24552// If two elements are equal, the lower-index element appears first.
24553//
24554// Arguments:
24555//	input: 1-D or higher with last dimension at least `k`.
24556//	k: 0-D.  Number of top elements to look for along the last dimension (along each
24557// row for matrices).
24558//
24559// Returns:
24560//	values: The `k` largest elements along each last dimensional slice.
24561//	indices: The indices of `values` within the last dimension of `input`.
24562func TopKV2(scope *Scope, input tf.Output, k tf.Output, optional ...TopKV2Attr) (values tf.Output, indices tf.Output) {
24563	if scope.Err() != nil {
24564		return
24565	}
24566	attrs := map[string]interface{}{}
24567	for _, a := range optional {
24568		a(attrs)
24569	}
24570	opspec := tf.OpSpec{
24571		Type: "TopKV2",
24572		Input: []tf.Input{
24573			input, k,
24574		},
24575		Attrs: attrs,
24576	}
24577	op := scope.AddOperation(opspec)
24578	return op.Output(0), op.Output(1)
24579}
24580
24581// TopKAttr is an optional argument to TopK.
24582type TopKAttr func(optionalAttr)
24583
24584// TopKSorted sets the optional sorted attribute to value.
24585//
24586// value: If true the resulting `k` elements will be sorted by the values in
24587// descending order.
24588// If not specified, defaults to true
24589func TopKSorted(value bool) TopKAttr {
24590	return func(m optionalAttr) {
24591		m["sorted"] = value
24592	}
24593}
24594
24595// Finds values and indices of the `k` largest elements for the last dimension.
24596//
24597// DEPRECATED at GraphDef version 7: Use TopKV2 instead
24598//
24599// If the input is a vector (rank-1), finds the `k` largest entries in the vector
24600// and outputs their values and indices as vectors.  Thus `values[j]` is the
24601// `j`-th largest entry in `input`, and its index is `indices[j]`.
24602//
24603// For matrices (resp. higher rank input), computes the top `k` entries in each
24604// row (resp. vector along the last dimension).  Thus,
24605//
24606//     values.shape = indices.shape = input.shape[:-1] + [k]
24607//
24608// If two elements are equal, the lower-index element appears first.
24609//
24610// If `k` varies dynamically, use `TopKV2` below.
24611//
24612// Arguments:
24613//	input: 1-D or higher with last dimension at least `k`.
24614//	k: Number of top elements to look for along the last dimension (along each
24615// row for matrices).
24616//
24617// Returns:
24618//	values: The `k` largest elements along each last dimensional slice.
24619//	indices: The indices of `values` within the last dimension of `input`.
24620func TopK(scope *Scope, input tf.Output, k int64, optional ...TopKAttr) (values tf.Output, indices tf.Output) {
24621	if scope.Err() != nil {
24622		return
24623	}
24624	attrs := map[string]interface{}{"k": k}
24625	for _, a := range optional {
24626		a(attrs)
24627	}
24628	opspec := tf.OpSpec{
24629		Type: "TopK",
24630		Input: []tf.Input{
24631			input,
24632		},
24633		Attrs: attrs,
24634	}
24635	op := scope.AddOperation(opspec)
24636	return op.Output(0), op.Output(1)
24637}
24638
24639// Outputs the single element from the given dataset.
24640//
24641// Arguments:
24642//	dataset: A handle to a dataset that contains a single element.
24643//
24644//
24645//
24646// Returns The components of the single element of `input`.
24647func DatasetToSingleElement(scope *Scope, dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (components []tf.Output) {
24648	if scope.Err() != nil {
24649		return
24650	}
24651	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
24652	opspec := tf.OpSpec{
24653		Type: "DatasetToSingleElement",
24654		Input: []tf.Input{
24655			dataset,
24656		},
24657		Attrs: attrs,
24658	}
24659	op := scope.AddOperation(opspec)
24660	if scope.Err() != nil {
24661		return
24662	}
24663	var idx int
24664	var err error
24665	if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
24666		scope.UpdateErr("DatasetToSingleElement", err)
24667		return
24668	}
24669	return components
24670}
24671
24672// Computes softmax cross entropy cost and gradients to backpropagate.
24673//
24674// Inputs are the logits, not probabilities.
24675//
24676// Arguments:
24677//	features: batch_size x num_classes matrix
24678//	labels: batch_size x num_classes matrix
24679// The caller must ensure that each batch of labels represents a valid
24680// probability distribution.
24681//
24682// Returns:
24683//	loss: Per example loss (batch_size vector).
24684//	backprop: backpropagated gradients (batch_size x num_classes matrix).
24685func SoftmaxCrossEntropyWithLogits(scope *Scope, features tf.Output, labels tf.Output) (loss tf.Output, backprop tf.Output) {
24686	if scope.Err() != nil {
24687		return
24688	}
24689	opspec := tf.OpSpec{
24690		Type: "SoftmaxCrossEntropyWithLogits",
24691		Input: []tf.Input{
24692			features, labels,
24693		},
24694	}
24695	op := scope.AddOperation(opspec)
24696	return op.Output(0), op.Output(1)
24697}
24698
24699// Computes log softmax activations.
24700//
24701// For each batch `i` and class `j` we have
24702//
24703//     logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i])))
24704//
24705// Arguments:
24706//	logits: 2-D with shape `[batch_size, num_classes]`.
24707//
24708// Returns Same shape as `logits`.
24709func LogSoftmax(scope *Scope, logits tf.Output) (logsoftmax tf.Output) {
24710	if scope.Err() != nil {
24711		return
24712	}
24713	opspec := tf.OpSpec{
24714		Type: "LogSoftmax",
24715		Input: []tf.Input{
24716			logits,
24717		},
24718	}
24719	op := scope.AddOperation(opspec)
24720	return op.Output(0)
24721}
24722
24723// Computes softmax activations.
24724//
24725// For each batch `i` and class `j` we have
24726//
24727//     $$softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))$$
24728//
24729// Arguments:
24730//	logits: 2-D with shape `[batch_size, num_classes]`.
24731//
24732// Returns Same shape as `logits`.
24733func Softmax(scope *Scope, logits tf.Output) (softmax tf.Output) {
24734	if scope.Err() != nil {
24735		return
24736	}
24737	opspec := tf.OpSpec{
24738		Type: "Softmax",
24739		Input: []tf.Input{
24740			logits,
24741		},
24742	}
24743	op := scope.AddOperation(opspec)
24744	return op.Output(0)
24745}
24746
24747// Computes softsign gradients for a softsign operation.
24748//
24749// Arguments:
24750//	gradients: The backpropagated gradients to the corresponding softsign operation.
24751//	features: The features passed as input to the corresponding softsign operation.
24752//
24753// Returns The gradients: `gradients / (1 + abs(features)) ** 2`.
24754func SoftsignGrad(scope *Scope, gradients tf.Output, features tf.Output) (backprops tf.Output) {
24755	if scope.Err() != nil {
24756		return
24757	}
24758	opspec := tf.OpSpec{
24759		Type: "SoftsignGrad",
24760		Input: []tf.Input{
24761			gradients, features,
24762		},
24763	}
24764	op := scope.AddOperation(opspec)
24765	return op.Output(0)
24766}
24767
24768// Resizes the list.
24769//
24770//
24771// input_handle: the input list
24772// size: size of the output list
24773//
24774func TensorListResize(scope *Scope, input_handle tf.Output, size tf.Output) (output_handle tf.Output) {
24775	if scope.Err() != nil {
24776		return
24777	}
24778	opspec := tf.OpSpec{
24779		Type: "TensorListResize",
24780		Input: []tf.Input{
24781			input_handle, size,
24782		},
24783	}
24784	op := scope.AddOperation(opspec)
24785	return op.Output(0)
24786}
24787
24788// FusedBatchNormAttr is an optional argument to FusedBatchNorm.
24789type FusedBatchNormAttr func(optionalAttr)
24790
24791// FusedBatchNormEpsilon sets the optional epsilon attribute to value.
24792//
24793// value: A small float number added to the variance of x.
24794// If not specified, defaults to 0.0001
24795func FusedBatchNormEpsilon(value float32) FusedBatchNormAttr {
24796	return func(m optionalAttr) {
24797		m["epsilon"] = value
24798	}
24799}
24800
24801// FusedBatchNormExponentialAvgFactor sets the optional exponential_avg_factor attribute to value.
24802// If not specified, defaults to 1
24803func FusedBatchNormExponentialAvgFactor(value float32) FusedBatchNormAttr {
24804	return func(m optionalAttr) {
24805		m["exponential_avg_factor"] = value
24806	}
24807}
24808
24809// FusedBatchNormDataFormat sets the optional data_format attribute to value.
24810//
24811// value: The data format for x and y. Either "NHWC" (default) or "NCHW".
24812// If not specified, defaults to "NHWC"
24813func FusedBatchNormDataFormat(value string) FusedBatchNormAttr {
24814	return func(m optionalAttr) {
24815		m["data_format"] = value
24816	}
24817}
24818
24819// FusedBatchNormIsTraining sets the optional is_training attribute to value.
24820//
24821// value: A bool value to indicate the operation is for training (default)
24822// or inference.
24823// If not specified, defaults to true
24824func FusedBatchNormIsTraining(value bool) FusedBatchNormAttr {
24825	return func(m optionalAttr) {
24826		m["is_training"] = value
24827	}
24828}
24829
24830// Batch normalization.
24831//
24832// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
24833// The size of 1D Tensors matches the dimension C of the 4D Tensors.
24834//
24835// Arguments:
24836//	x: A 4D Tensor for input data.
24837//	scale: A 1D Tensor for scaling factor, to scale the normalized x.
24838//	offset: A 1D Tensor for offset, to shift to the normalized x.
24839//	mean: A 1D Tensor for population mean. Used for inference only;
24840// must be empty for training.
24841//	variance: A 1D Tensor for population variance. Used for inference only;
24842// must be empty for training.
24843//
24844// Returns:
24845//	y: A 4D Tensor for output data.
24846//	batch_mean: A 1D Tensor for the computed batch mean, to be used by TensorFlow
24847// to compute the running mean.
24848//	batch_variance: A 1D Tensor for the computed batch variance, to be used by
24849// TensorFlow to compute the running variance.
24850//	reserve_space_1: A 1D Tensor for the computed batch mean, to be reused
24851// in the gradient computation.
24852//	reserve_space_2: A 1D Tensor for the computed batch variance (inverted variance
24853// in the cuDNN case), to be reused in the gradient computation.
24854func FusedBatchNorm(scope *Scope, x tf.Output, scale tf.Output, offset tf.Output, mean tf.Output, variance tf.Output, optional ...FusedBatchNormAttr) (y tf.Output, batch_mean tf.Output, batch_variance tf.Output, reserve_space_1 tf.Output, reserve_space_2 tf.Output) {
24855	if scope.Err() != nil {
24856		return
24857	}
24858	attrs := map[string]interface{}{}
24859	for _, a := range optional {
24860		a(attrs)
24861	}
24862	opspec := tf.OpSpec{
24863		Type: "FusedBatchNorm",
24864		Input: []tf.Input{
24865			x, scale, offset, mean, variance,
24866		},
24867		Attrs: attrs,
24868	}
24869	op := scope.AddOperation(opspec)
24870	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
24871}
24872
24873// SparseMatMulAttr is an optional argument to SparseMatMul.
24874type SparseMatMulAttr func(optionalAttr)
24875
24876// SparseMatMulTransposeA sets the optional transpose_a attribute to value.
24877// If not specified, defaults to false
24878func SparseMatMulTransposeA(value bool) SparseMatMulAttr {
24879	return func(m optionalAttr) {
24880		m["transpose_a"] = value
24881	}
24882}
24883
24884// SparseMatMulTransposeB sets the optional transpose_b attribute to value.
24885// If not specified, defaults to false
24886func SparseMatMulTransposeB(value bool) SparseMatMulAttr {
24887	return func(m optionalAttr) {
24888		m["transpose_b"] = value
24889	}
24890}
24891
24892// SparseMatMulAIsSparse sets the optional a_is_sparse attribute to value.
24893// If not specified, defaults to false
24894func SparseMatMulAIsSparse(value bool) SparseMatMulAttr {
24895	return func(m optionalAttr) {
24896		m["a_is_sparse"] = value
24897	}
24898}
24899
24900// SparseMatMulBIsSparse sets the optional b_is_sparse attribute to value.
24901// If not specified, defaults to false
24902func SparseMatMulBIsSparse(value bool) SparseMatMulAttr {
24903	return func(m optionalAttr) {
24904		m["b_is_sparse"] = value
24905	}
24906}
24907
24908// Multiply matrix "a" by matrix "b".
24909//
24910// The inputs must be two-dimensional matrices and the inner dimension of "a" must
24911// match the outer dimension of "b". Both "a" and "b" must be `Tensor`s not
24912// `SparseTensor`s.  This op is optimized for the case where at least one of "a" or
24913// "b" is sparse, in the sense that they have a large proportion of zero values.
24914// The breakeven for using this versus a dense matrix multiply on one platform was
24915// 30% zero values in the sparse matrix.
24916//
24917// The gradient computation of this operation will only take advantage of sparsity
24918// in the input gradient when that gradient comes from a Relu.
24919func SparseMatMul(scope *Scope, a tf.Output, b tf.Output, optional ...SparseMatMulAttr) (product tf.Output) {
24920	if scope.Err() != nil {
24921		return
24922	}
24923	attrs := map[string]interface{}{}
24924	for _, a := range optional {
24925		a(attrs)
24926	}
24927	opspec := tf.OpSpec{
24928		Type: "SparseMatMul",
24929		Input: []tf.Input{
24930			a, b,
24931		},
24932		Attrs: attrs,
24933	}
24934	op := scope.AddOperation(opspec)
24935	return op.Output(0)
24936}
24937
24938// Computes the LSTM cell backward propagation for 1 timestep.
24939//
24940// This implementation is to be used in conjunction of LSTMBlockCell.
24941//
24942// Arguments:
24943//	x: The input to the LSTM cell, shape (batch_size, num_inputs).
24944//	cs_prev: The previous cell state.
24945//	h_prev: The previous h state.
24946//	w: The weight matrix.
24947//	wci: The weight matrix for input gate peephole connection.
24948//	wcf: The weight matrix for forget gate peephole connection.
24949//	wco: The weight matrix for output gate peephole connection.
24950//	b: The bias vector.
24951//	i: The input gate.
24952//	cs: The cell state before the tanh.
24953//	f: The forget gate.
24954//	o: The output gate.
24955//	ci: The cell input.
24956//	co: The cell after the tanh.
24957//	cs_grad: The current gradient of cs.
24958//	h_grad: The gradient of h vector.
24959//	use_peephole: Whether the cell uses peephole connections.
24960//
24961// Returns:
24962//	cs_prev_grad: The gradient of cs to be back-propped.
24963//	dicfo: The derivative wrt to [i, cs, f, o].
24964//	wci_grad: The gradient for wci to be back-propped.
24965//	wcf_grad: The gradient for wcf to be back-propped.
24966//	wco_grad: The gradient for wco to be back-propped.
24967func LSTMBlockCellGrad(scope *Scope, x tf.Output, cs_prev tf.Output, h_prev tf.Output, w tf.Output, wci tf.Output, wcf tf.Output, wco tf.Output, b tf.Output, i tf.Output, cs tf.Output, f tf.Output, o tf.Output, ci tf.Output, co tf.Output, cs_grad tf.Output, h_grad tf.Output, use_peephole bool) (cs_prev_grad tf.Output, dicfo tf.Output, wci_grad tf.Output, wcf_grad tf.Output, wco_grad tf.Output) {
24968	if scope.Err() != nil {
24969		return
24970	}
24971	attrs := map[string]interface{}{"use_peephole": use_peephole}
24972	opspec := tf.OpSpec{
24973		Type: "LSTMBlockCellGrad",
24974		Input: []tf.Input{
24975			x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad,
24976		},
24977		Attrs: attrs,
24978	}
24979	op := scope.AddOperation(opspec)
24980	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
24981}
24982
24983// Computes gradients for the scaled exponential linear (Selu) operation.
24984//
24985// Arguments:
24986//	gradients: The backpropagated gradients to the corresponding Selu operation.
24987//	outputs: The outputs of the corresponding Selu operation.
24988//
24989// Returns The gradients: `gradients * (outputs + scale * alpha)`
24990// if outputs < 0, `scale * gradients` otherwise.
24991func SeluGrad(scope *Scope, gradients tf.Output, outputs tf.Output) (backprops tf.Output) {
24992	if scope.Err() != nil {
24993		return
24994	}
24995	opspec := tf.OpSpec{
24996		Type: "SeluGrad",
24997		Input: []tf.Input{
24998			gradients, outputs,
24999		},
25000	}
25001	op := scope.AddOperation(opspec)
25002	return op.Output(0)
25003}
25004
25005// LeakyReluGradAttr is an optional argument to LeakyReluGrad.
25006type LeakyReluGradAttr func(optionalAttr)
25007
25008// LeakyReluGradAlpha sets the optional alpha attribute to value.
25009// If not specified, defaults to 0.2
25010func LeakyReluGradAlpha(value float32) LeakyReluGradAttr {
25011	return func(m optionalAttr) {
25012		m["alpha"] = value
25013	}
25014}
25015
25016// Computes rectified linear gradients for a LeakyRelu operation.
25017//
25018// Arguments:
25019//	gradients: The backpropagated gradients to the corresponding LeakyRelu operation.
25020//	features: The features passed as input to the corresponding LeakyRelu operation,
25021// OR the outputs of that operation (both work equivalently).
25022//
25023// Returns `gradients * (features > 0) + alpha * gradients * (features <= 0)`.
25024func LeakyReluGrad(scope *Scope, gradients tf.Output, features tf.Output, optional ...LeakyReluGradAttr) (backprops tf.Output) {
25025	if scope.Err() != nil {
25026		return
25027	}
25028	attrs := map[string]interface{}{}
25029	for _, a := range optional {
25030		a(attrs)
25031	}
25032	opspec := tf.OpSpec{
25033		Type: "LeakyReluGrad",
25034		Input: []tf.Input{
25035			gradients, features,
25036		},
25037		Attrs: attrs,
25038	}
25039	op := scope.AddOperation(opspec)
25040	return op.Output(0)
25041}
25042
25043// Computes the gradient of morphological 2-D dilation with respect to the filter.
25044//
25045// Arguments:
25046//	input: 4-D with shape `[batch, in_height, in_width, depth]`.
25047//	filter: 3-D with shape `[filter_height, filter_width, depth]`.
25048//	out_backprop: 4-D with shape `[batch, out_height, out_width, depth]`.
25049//	strides: 1-D of length 4. The stride of the sliding window for each dimension of
25050// the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
25051//	rates: 1-D of length 4. The input stride for atrous morphological dilation.
25052// Must be: `[1, rate_height, rate_width, 1]`.
25053//	padding: The type of padding algorithm to use.
25054//
25055// Returns 3-D with shape `[filter_height, filter_width, depth]`.
25056func Dilation2DBackpropFilter(scope *Scope, input tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, rates []int64, padding string) (filter_backprop tf.Output) {
25057	if scope.Err() != nil {
25058		return
25059	}
25060	attrs := map[string]interface{}{"strides": strides, "rates": rates, "padding": padding}
25061	opspec := tf.OpSpec{
25062		Type: "Dilation2DBackpropFilter",
25063		Input: []tf.Input{
25064			input, filter, out_backprop,
25065		},
25066		Attrs: attrs,
25067	}
25068	op := scope.AddOperation(opspec)
25069	return op.Output(0)
25070}
25071
25072// Converts the given variant tensor to an iterator and stores it in the given resource.
25073//
25074// Arguments:
25075//	resource_handle: A handle to an iterator resource.
25076//	serialized: A variant tensor storing the state of the iterator contained in the
25077// resource.
25078//
25079// Returns the created operation.
25080func DeserializeIterator(scope *Scope, resource_handle tf.Output, serialized tf.Output) (o *tf.Operation) {
25081	if scope.Err() != nil {
25082		return
25083	}
25084	opspec := tf.OpSpec{
25085		Type: "DeserializeIterator",
25086		Input: []tf.Input{
25087			resource_handle, serialized,
25088		},
25089	}
25090	return scope.AddOperation(opspec)
25091}
25092
25093// Computes the gradient for the rsqrt of `x` wrt its input.
25094//
25095// Specifically, `grad = dy * -0.5 * y^3`, where `y = rsqrt(x)`, and `dy`
25096// is the corresponding input gradient.
25097func RsqrtGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
25098	if scope.Err() != nil {
25099		return
25100	}
25101	opspec := tf.OpSpec{
25102		Type: "RsqrtGrad",
25103		Input: []tf.Input{
25104			y, dy,
25105		},
25106	}
25107	op := scope.AddOperation(opspec)
25108	return op.Output(0)
25109}
25110
25111// MaxPoolWithArgmaxAttr is an optional argument to MaxPoolWithArgmax.
25112type MaxPoolWithArgmaxAttr func(optionalAttr)
25113
25114// MaxPoolWithArgmaxTargmax sets the optional Targmax attribute to value.
25115// If not specified, defaults to DT_INT64
25116func MaxPoolWithArgmaxTargmax(value tf.DataType) MaxPoolWithArgmaxAttr {
25117	return func(m optionalAttr) {
25118		m["Targmax"] = value
25119	}
25120}
25121
25122// MaxPoolWithArgmaxIncludeBatchInIndex sets the optional include_batch_in_index attribute to value.
25123//
25124// value: Whether to include batch dimension in flattened index of `argmax`.
25125// If not specified, defaults to false
25126func MaxPoolWithArgmaxIncludeBatchInIndex(value bool) MaxPoolWithArgmaxAttr {
25127	return func(m optionalAttr) {
25128		m["include_batch_in_index"] = value
25129	}
25130}
25131
25132// Performs max pooling on the input and outputs both max values and indices.
25133//
25134// The indices in `argmax` are flattened, so that a maximum value at position
25135// `[b, y, x, c]` becomes flattened index:
25136// `(y * width + x) * channels + c` if `include_batch_in_index` is False;
25137// `((b * height + y) * width + x) * channels + c` if `include_batch_in_index` is True.
25138//
25139// The indices returned are always in `[0, height) x [0, width)` before flattening,
25140// even if padding is involved and the mathematically correct answer is outside
25141// (either negative or too large).  This is a bug, but fixing it is difficult to do
25142// in a safe backwards compatible way, especially due to flattening.
25143//
25144// Arguments:
25145//	input: 4-D with shape `[batch, height, width, channels]`.  Input to pool over.
25146//	ksize: The size of the window for each dimension of the input tensor.
25147//	strides: The stride of the sliding window for each dimension of the
25148// input tensor.
25149//	padding: The type of padding algorithm to use.
25150//
25151// Returns:
25152//	output: The max pooled output tensor.
25153//	argmax: 4-D.  The flattened indices of the max values chosen for each output.
25154func MaxPoolWithArgmax(scope *Scope, input tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPoolWithArgmaxAttr) (output tf.Output, argmax tf.Output) {
25155	if scope.Err() != nil {
25156		return
25157	}
25158	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
25159	for _, a := range optional {
25160		a(attrs)
25161	}
25162	opspec := tf.OpSpec{
25163		Type: "MaxPoolWithArgmax",
25164		Input: []tf.Input{
25165			input,
25166		},
25167		Attrs: attrs,
25168	}
25169	op := scope.AddOperation(opspec)
25170	return op.Output(0), op.Output(1)
25171}
25172
25173// Returns the number of tensors in the input tensor map.
25174//
25175// input_handle: the input map
25176// size: the number of tensors in the map
25177func TensorMapSize(scope *Scope, input_handle tf.Output) (size tf.Output) {
25178	if scope.Err() != nil {
25179		return
25180	}
25181	opspec := tf.OpSpec{
25182		Type: "TensorMapSize",
25183		Input: []tf.Input{
25184			input_handle,
25185		},
25186	}
25187	op := scope.AddOperation(opspec)
25188	return op.Output(0)
25189}
25190
25191// MaxPoolGradGradAttr is an optional argument to MaxPoolGradGrad.
25192type MaxPoolGradGradAttr func(optionalAttr)
25193
25194// MaxPoolGradGradDataFormat sets the optional data_format attribute to value.
25195//
25196// value: Specify the data format of the input and output data. With the
25197// default format "NHWC", the data is stored in the order of:
25198//     [batch, in_height, in_width, in_channels].
25199// Alternatively, the format could be "NCHW", the data storage order of:
25200//     [batch, in_channels, in_height, in_width].
25201// If not specified, defaults to "NHWC"
25202func MaxPoolGradGradDataFormat(value string) MaxPoolGradGradAttr {
25203	return func(m optionalAttr) {
25204		m["data_format"] = value
25205	}
25206}
25207
25208// Computes second-order gradients of the maxpooling function.
25209//
25210// Arguments:
25211//	orig_input: The original input tensor.
25212//	orig_output: The original output tensor.
25213//	grad: 4-D.  Gradients of gradients w.r.t. the input of `max_pool`.
25214//	ksize: The size of the window for each dimension of the input tensor.
25215//	strides: The stride of the sliding window for each dimension of the
25216// input tensor.
25217//	padding: The type of padding algorithm to use.
25218//
25219// Returns Gradients of gradients w.r.t. the input to `max_pool`.
25220func MaxPoolGradGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPoolGradGradAttr) (output tf.Output) {
25221	if scope.Err() != nil {
25222		return
25223	}
25224	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
25225	for _, a := range optional {
25226		a(attrs)
25227	}
25228	opspec := tf.OpSpec{
25229		Type: "MaxPoolGradGrad",
25230		Input: []tf.Input{
25231			orig_input, orig_output, grad,
25232		},
25233		Attrs: attrs,
25234	}
25235	op := scope.AddOperation(opspec)
25236	return op.Output(0)
25237}
25238
25239// Takes the packed uint32 input and unpacks the input to uint8 to do
25240//
25241// Dequantization on device.
25242//
25243// Arguments:
25244//	input: Input tensors whose types is uint32, shape is [d0, ..., dn].
25245//	min_range: The minimum scalar value possibly produced for the input.
25246//	max_range: The maximum scalar value possibly produced for the input.
25247//	mode: String to determine the dequantize mode in {"MIN_COMBINED", "MIN_FIRST", "SCALED"}.
25248//	transpose_output: Boolean to determine if output is transposed. transpose_output
25249// is faster when input is large and rank of input is higher than 1.
25250//
25251// Returns Output tensors whose types is bloat16. If transpose_output is true,
25252// output shape is [dn * 4, dn-1, ..., d1, d0]. If transpose_output
25253// is false, output shape is [d0,..., dn * 4].
25254func XlaDequantize(scope *Scope, input tf.Output, min_range float32, max_range float32, mode string, transpose_output bool) (output tf.Output) {
25255	if scope.Err() != nil {
25256		return
25257	}
25258	attrs := map[string]interface{}{"min_range": min_range, "max_range": max_range, "mode": mode, "transpose_output": transpose_output}
25259	opspec := tf.OpSpec{
25260		Type: "XlaDequantize",
25261		Input: []tf.Input{
25262			input,
25263		},
25264		Attrs: attrs,
25265	}
25266	op := scope.AddOperation(opspec)
25267	return op.Output(0)
25268}
25269
25270// MaxPoolGradV2Attr is an optional argument to MaxPoolGradV2.
25271type MaxPoolGradV2Attr func(optionalAttr)
25272
25273// MaxPoolGradV2DataFormat sets the optional data_format attribute to value.
25274//
25275// value: Specify the data format of the input and output data. With the
25276// default format "NHWC", the data is stored in the order of:
25277//     [batch, in_height, in_width, in_channels].
25278// Alternatively, the format could be "NCHW", the data storage order of:
25279//     [batch, in_channels, in_height, in_width].
25280// If not specified, defaults to "NHWC"
25281func MaxPoolGradV2DataFormat(value string) MaxPoolGradV2Attr {
25282	return func(m optionalAttr) {
25283		m["data_format"] = value
25284	}
25285}
25286
25287// Computes gradients of the maxpooling function.
25288//
25289// Arguments:
25290//	orig_input: The original input tensor.
25291//	orig_output: The original output tensor.
25292//	grad: 4-D.  Gradients w.r.t. the output of `max_pool`.
25293//	ksize: The size of the window for each dimension of the input tensor.
25294//	strides: The stride of the sliding window for each dimension of the
25295// input tensor.
25296//	padding: The type of padding algorithm to use.
25297//
25298// Returns Gradients w.r.t. the input to `max_pool`.
25299func MaxPoolGradV2(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize tf.Output, strides tf.Output, padding string, optional ...MaxPoolGradV2Attr) (output tf.Output) {
25300	if scope.Err() != nil {
25301		return
25302	}
25303	attrs := map[string]interface{}{"padding": padding}
25304	for _, a := range optional {
25305		a(attrs)
25306	}
25307	opspec := tf.OpSpec{
25308		Type: "MaxPoolGradV2",
25309		Input: []tf.Input{
25310			orig_input, orig_output, grad, ksize, strides,
25311		},
25312		Attrs: attrs,
25313	}
25314	op := scope.AddOperation(opspec)
25315	return op.Output(0)
25316}
25317
25318// Concats all tensors in the list along the 0th dimension.
25319//
25320// Requires that all tensors have the same shape except the first dimension.
25321//
25322// input_handle: The input list.
25323// element_shape: The shape of the uninitialized elements in the list. If the first
25324//   dimension is not -1, it is assumed that all list elements have the same
25325//   leading dim.
25326// leading_dims: The list of leading dims of uninitialized list elements. Used if
25327//   the leading dim of input_handle.element_shape or the element_shape input arg
25328//   is not already set.
25329// tensor: The concated result.
25330// lengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used for computing the gradient.
25331//
25332func TensorListConcatV2(scope *Scope, input_handle tf.Output, element_shape tf.Output, leading_dims tf.Output, element_dtype tf.DataType) (tensor tf.Output, lengths tf.Output) {
25333	if scope.Err() != nil {
25334		return
25335	}
25336	attrs := map[string]interface{}{"element_dtype": element_dtype}
25337	opspec := tf.OpSpec{
25338		Type: "TensorListConcatV2",
25339		Input: []tf.Input{
25340			input_handle, element_shape, leading_dims,
25341		},
25342		Attrs: attrs,
25343	}
25344	op := scope.AddOperation(opspec)
25345	return op.Output(0), op.Output(1)
25346}
25347
25348// MaxPoolV2Attr is an optional argument to MaxPoolV2.
25349type MaxPoolV2Attr func(optionalAttr)
25350
25351// MaxPoolV2DataFormat sets the optional data_format attribute to value.
25352//
25353// value: Specify the data format of the input and output data. With the
25354// default format "NHWC", the data is stored in the order of:
25355//     [batch, in_height, in_width, in_channels].
25356// Alternatively, the format could be "NCHW", the data storage order of:
25357//     [batch, in_channels, in_height, in_width].
25358// If not specified, defaults to "NHWC"
25359func MaxPoolV2DataFormat(value string) MaxPoolV2Attr {
25360	return func(m optionalAttr) {
25361		m["data_format"] = value
25362	}
25363}
25364
25365// Performs max pooling on the input.
25366//
25367// Arguments:
25368//	input: 4-D input to pool over.
25369//	ksize: The size of the window for each dimension of the input tensor.
25370//	strides: The stride of the sliding window for each dimension of the
25371// input tensor.
25372//	padding: The type of padding algorithm to use.
25373//
25374// Returns The max pooled output tensor.
25375func MaxPoolV2(scope *Scope, input tf.Output, ksize tf.Output, strides tf.Output, padding string, optional ...MaxPoolV2Attr) (output tf.Output) {
25376	if scope.Err() != nil {
25377		return
25378	}
25379	attrs := map[string]interface{}{"padding": padding}
25380	for _, a := range optional {
25381		a(attrs)
25382	}
25383	opspec := tf.OpSpec{
25384		Type: "MaxPoolV2",
25385		Input: []tf.Input{
25386			input, ksize, strides,
25387		},
25388		Attrs: attrs,
25389	}
25390	op := scope.AddOperation(opspec)
25391	return op.Output(0)
25392}
25393
25394// SparseReduceSumAttr is an optional argument to SparseReduceSum.
25395type SparseReduceSumAttr func(optionalAttr)
25396
25397// SparseReduceSumKeepDims sets the optional keep_dims attribute to value.
25398//
25399// value: If true, retain reduced dimensions with length 1.
25400// If not specified, defaults to false
25401func SparseReduceSumKeepDims(value bool) SparseReduceSumAttr {
25402	return func(m optionalAttr) {
25403		m["keep_dims"] = value
25404	}
25405}
25406
25407// Computes the sum of elements across dimensions of a SparseTensor.
25408//
25409// This Op takes a SparseTensor and is the sparse counterpart to
25410// `tf.reduce_sum()`.  In particular, this Op also returns a dense `Tensor`
25411// instead of a sparse one.
25412//
25413// Reduces `sp_input` along the dimensions given in `reduction_axes`.  Unless
25414// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
25415// `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
25416// with length 1.
25417//
25418// If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
25419// with a single element is returned.  Additionally, the axes can be negative,
25420// which are interpreted according to the indexing rules in Python.
25421//
25422// Arguments:
25423//	input_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
25424// SparseTensor, possibly not in canonical ordering.
25425//	input_values: 1-D.  `N` non-empty values corresponding to `input_indices`.
25426//	input_shape: 1-D.  Shape of the input SparseTensor.
25427//	reduction_axes: 1-D.  Length-`K` vector containing the reduction axes.
25428//
25429// Returns `R-K`-D.  The reduced Tensor.
25430func SparseReduceSum(scope *Scope, input_indices tf.Output, input_values tf.Output, input_shape tf.Output, reduction_axes tf.Output, optional ...SparseReduceSumAttr) (output tf.Output) {
25431	if scope.Err() != nil {
25432		return
25433	}
25434	attrs := map[string]interface{}{}
25435	for _, a := range optional {
25436		a(attrs)
25437	}
25438	opspec := tf.OpSpec{
25439		Type: "SparseReduceSum",
25440		Input: []tf.Input{
25441			input_indices, input_values, input_shape, reduction_axes,
25442		},
25443		Attrs: attrs,
25444	}
25445	op := scope.AddOperation(opspec)
25446	return op.Output(0)
25447}
25448
25449// Compute the Hurwitz zeta function \\(\zeta(x, q)\\).
25450//
25451// The Hurwitz zeta function is defined as:
25452//
25453//
25454// \\(\zeta(x, q) = \sum_{n=0}^{\infty} (q + n)^{-x}\\)
25455func Zeta(scope *Scope, x tf.Output, q tf.Output) (z tf.Output) {
25456	if scope.Err() != nil {
25457		return
25458	}
25459	opspec := tf.OpSpec{
25460		Type: "Zeta",
25461		Input: []tf.Input{
25462			x, q,
25463		},
25464	}
25465	op := scope.AddOperation(opspec)
25466	return op.Output(0)
25467}
25468
25469// QuantizedMatMulWithBiasAndReluAndRequantizeAttr is an optional argument to QuantizedMatMulWithBiasAndReluAndRequantize.
25470type QuantizedMatMulWithBiasAndReluAndRequantizeAttr func(optionalAttr)
25471
25472// QuantizedMatMulWithBiasAndReluAndRequantizeToutput sets the optional Toutput attribute to value.
25473// If not specified, defaults to DT_QUINT8
25474func QuantizedMatMulWithBiasAndReluAndRequantizeToutput(value tf.DataType) QuantizedMatMulWithBiasAndReluAndRequantizeAttr {
25475	return func(m optionalAttr) {
25476		m["Toutput"] = value
25477	}
25478}
25479
25480// QuantizedMatMulWithBiasAndReluAndRequantizeTransposeA sets the optional transpose_a attribute to value.
25481//
25482// value: If true, `a` is transposed before multiplication.
25483// If not specified, defaults to false
25484func QuantizedMatMulWithBiasAndReluAndRequantizeTransposeA(value bool) QuantizedMatMulWithBiasAndReluAndRequantizeAttr {
25485	return func(m optionalAttr) {
25486		m["transpose_a"] = value
25487	}
25488}
25489
25490// QuantizedMatMulWithBiasAndReluAndRequantizeTransposeB sets the optional transpose_b attribute to value.
25491//
25492// value: If true, `b` is transposed before multiplication.
25493// If not specified, defaults to false
25494func QuantizedMatMulWithBiasAndReluAndRequantizeTransposeB(value bool) QuantizedMatMulWithBiasAndReluAndRequantizeAttr {
25495	return func(m optionalAttr) {
25496		m["transpose_b"] = value
25497	}
25498}
25499
25500// QuantizedMatMulWithBiasAndReluAndRequantizeInputQuantMode sets the optional input_quant_mode attribute to value.
25501//
25502// value: Input data quantization mode. Either MIN_FIRST(default) or SCALED.
25503// If not specified, defaults to "MIN_FIRST"
25504func QuantizedMatMulWithBiasAndReluAndRequantizeInputQuantMode(value string) QuantizedMatMulWithBiasAndReluAndRequantizeAttr {
25505	return func(m optionalAttr) {
25506		m["input_quant_mode"] = value
25507	}
25508}
25509
25510// Perform a quantized matrix multiplication of  `a` by the matrix `b` with bias
25511// add and relu and requantize fusion.
25512//
25513// The inputs must be two-dimensional matrices and 1D bias vector. And the inner
25514// dimension of `a` (after being transposed if `transpose_a` is non-zero) must
25515// match the outer dimension of `b` (after being transposed if `transposed_b` is
25516// non-zero). Then do broadcast add operation with bias values on the matrix
25517// multiplication result. The bias size must match inner dimension of `b`.  Then do
25518// relu activation to get non-negative result. Then do requantize operation to get
25519// final uint8 result.
25520//
25521// Arguments:
25522//	a: A matrix to be multiplied. Must be a two-dimensional tensor of type `quint8`.
25523//	b: A matrix to be multiplied and must be a two-dimensional tensor of type `qint8`.
25524//	bias: A 1D bias tensor with size matching with inner dimension of `b` (after being
25525// transposed if `transposed_b` is non-zero).
25526//	min_a: The float value that the lowest quantized `a` value represents.
25527//	max_a: The float value that the highest quantized `a` value represents.
25528//	min_b: The float value that the lowest quantized `b` value represents.
25529//	max_b: The float value that the highest quantized `b` value represents.
25530//	min_freezed_output: The float value that the highest quantized output value after requantize.
25531//
25532//
25533// Returns:
25534//	out
25535//	min_out: The float value that the lowest quantized output value represents.
25536//	max_out: The float value that the highest quantized output value represents.
25537func QuantizedMatMulWithBiasAndReluAndRequantize(scope *Scope, a tf.Output, b tf.Output, bias tf.Output, min_a tf.Output, max_a tf.Output, min_b tf.Output, max_b tf.Output, min_freezed_output tf.Output, max_freezed_output tf.Output, optional ...QuantizedMatMulWithBiasAndReluAndRequantizeAttr) (out tf.Output, min_out tf.Output, max_out tf.Output) {
25538	if scope.Err() != nil {
25539		return
25540	}
25541	attrs := map[string]interface{}{}
25542	for _, a := range optional {
25543		a(attrs)
25544	}
25545	opspec := tf.OpSpec{
25546		Type: "QuantizedMatMulWithBiasAndReluAndRequantize",
25547		Input: []tf.Input{
25548			a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output,
25549		},
25550		Attrs: attrs,
25551	}
25552	op := scope.AddOperation(opspec)
25553	return op.Output(0), op.Output(1), op.Output(2)
25554}
25555
25556// Conv3DBackpropInputV2Attr is an optional argument to Conv3DBackpropInputV2.
25557type Conv3DBackpropInputV2Attr func(optionalAttr)
25558
25559// Conv3DBackpropInputV2DataFormat sets the optional data_format attribute to value.
25560//
25561// value: The data format of the input and output data. With the
25562// default format "NDHWC", the data is stored in the order of:
25563//     [batch, in_depth, in_height, in_width, in_channels].
25564// Alternatively, the format could be "NCDHW", the data storage order is:
25565//     [batch, in_channels, in_depth, in_height, in_width].
25566// If not specified, defaults to "NDHWC"
25567func Conv3DBackpropInputV2DataFormat(value string) Conv3DBackpropInputV2Attr {
25568	return func(m optionalAttr) {
25569		m["data_format"] = value
25570	}
25571}
25572
25573// Conv3DBackpropInputV2Dilations sets the optional dilations attribute to value.
25574//
25575// value: 1-D tensor of length 5.  The dilation factor for each dimension of
25576// `input`. If set to k > 1, there will be k-1 skipped cells between each
25577// filter element on that dimension. The dimension order is determined by the
25578// value of `data_format`, see above for details. Dilations in the batch and
25579// depth dimensions must be 1.
25580// If not specified, defaults to <i:1 i:1 i:1 i:1 i:1 >
25581func Conv3DBackpropInputV2Dilations(value []int64) Conv3DBackpropInputV2Attr {
25582	return func(m optionalAttr) {
25583		m["dilations"] = value
25584	}
25585}
25586
25587// Computes the gradients of 3-D convolution with respect to the input.
25588//
25589// Arguments:
25590//	input_sizes: An integer vector representing the tensor shape of `input`,
25591// where `input` is a 5-D
25592// `[batch, depth, rows, cols, in_channels]` tensor.
25593//	filter: Shape `[depth, rows, cols, in_channels, out_channels]`.
25594// `in_channels` must match between `input` and `filter`.
25595//	out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
25596// out_channels]`.
25597//	strides: 1-D tensor of length 5. The stride of the sliding window for each
25598// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
25599//	padding: The type of padding algorithm to use.
25600func Conv3DBackpropInputV2(scope *Scope, input_sizes tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv3DBackpropInputV2Attr) (output tf.Output) {
25601	if scope.Err() != nil {
25602		return
25603	}
25604	attrs := map[string]interface{}{"strides": strides, "padding": padding}
25605	for _, a := range optional {
25606		a(attrs)
25607	}
25608	opspec := tf.OpSpec{
25609		Type: "Conv3DBackpropInputV2",
25610		Input: []tf.Input{
25611			input_sizes, filter, out_backprop,
25612		},
25613		Attrs: attrs,
25614	}
25615	op := scope.AddOperation(opspec)
25616	return op.Output(0)
25617}
25618
25619// LRNAttr is an optional argument to LRN.
25620type LRNAttr func(optionalAttr)
25621
25622// LRNDepthRadius sets the optional depth_radius attribute to value.
25623//
25624// value: 0-D.  Half-width of the 1-D normalization window.
25625// If not specified, defaults to 5
25626func LRNDepthRadius(value int64) LRNAttr {
25627	return func(m optionalAttr) {
25628		m["depth_radius"] = value
25629	}
25630}
25631
25632// LRNBias sets the optional bias attribute to value.
25633//
25634// value: An offset (usually positive to avoid dividing by 0).
25635// If not specified, defaults to 1
25636func LRNBias(value float32) LRNAttr {
25637	return func(m optionalAttr) {
25638		m["bias"] = value
25639	}
25640}
25641
25642// LRNAlpha sets the optional alpha attribute to value.
25643//
25644// value: A scale factor, usually positive.
25645// If not specified, defaults to 1
25646func LRNAlpha(value float32) LRNAttr {
25647	return func(m optionalAttr) {
25648		m["alpha"] = value
25649	}
25650}
25651
25652// LRNBeta sets the optional beta attribute to value.
25653//
25654// value: An exponent.
25655// If not specified, defaults to 0.5
25656func LRNBeta(value float32) LRNAttr {
25657	return func(m optionalAttr) {
25658		m["beta"] = value
25659	}
25660}
25661
25662// Local Response Normalization.
25663//
25664// The 4-D `input` tensor is treated as a 3-D array of 1-D vectors (along the last
25665// dimension), and each vector is normalized independently.  Within a given vector,
25666// each component is divided by the weighted, squared sum of inputs within
25667// `depth_radius`.  In detail,
25668//
25669//     sqr_sum[a, b, c, d] =
25670//         sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2)
25671//     output = input / (bias + alpha * sqr_sum) ** beta
25672//
25673// For details, see [Krizhevsky et al., ImageNet classification with deep
25674// convolutional neural networks (NIPS 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks).
25675//
25676// Arguments:
25677//	input: 4-D.
25678func LRN(scope *Scope, input tf.Output, optional ...LRNAttr) (output tf.Output) {
25679	if scope.Err() != nil {
25680		return
25681	}
25682	attrs := map[string]interface{}{}
25683	for _, a := range optional {
25684		a(attrs)
25685	}
25686	opspec := tf.OpSpec{
25687		Type: "LRN",
25688		Input: []tf.Input{
25689			input,
25690		},
25691		Attrs: attrs,
25692	}
25693	op := scope.AddOperation(opspec)
25694	return op.Output(0)
25695}
25696
25697// Returns which elements of x are Inf.
25698//
25699// @compatibility(numpy)
25700// Equivalent to np.isinf
25701// @end_compatibility
25702//
25703// Example:
25704//
25705// ```python
25706// x = tf.constant([5.0, np.inf, 6.8, np.inf])
25707// tf.math.is_inf(x) ==> [False, True, False, True]
25708// ```
25709func IsInf(scope *Scope, x tf.Output) (y tf.Output) {
25710	if scope.Err() != nil {
25711		return
25712	}
25713	opspec := tf.OpSpec{
25714		Type: "IsInf",
25715		Input: []tf.Input{
25716			x,
25717		},
25718	}
25719	op := scope.AddOperation(opspec)
25720	return op.Output(0)
25721}
25722
25723// MaxPool3DAttr is an optional argument to MaxPool3D.
25724type MaxPool3DAttr func(optionalAttr)
25725
25726// MaxPool3DDataFormat sets the optional data_format attribute to value.
25727//
25728// value: The data format of the input and output data. With the
25729// default format "NDHWC", the data is stored in the order of:
25730//     [batch, in_depth, in_height, in_width, in_channels].
25731// Alternatively, the format could be "NCDHW", the data storage order is:
25732//     [batch, in_channels, in_depth, in_height, in_width].
25733// If not specified, defaults to "NDHWC"
25734func MaxPool3DDataFormat(value string) MaxPool3DAttr {
25735	return func(m optionalAttr) {
25736		m["data_format"] = value
25737	}
25738}
25739
25740// Performs 3D max pooling on the input.
25741//
25742// Arguments:
25743//	input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over.
25744//	ksize: 1-D tensor of length 5. The size of the window for each dimension of
25745// the input tensor. Must have `ksize[0] = ksize[4] = 1`.
25746//	strides: 1-D tensor of length 5. The stride of the sliding window for each
25747// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
25748//	padding: The type of padding algorithm to use.
25749//
25750// Returns The max pooled output tensor.
25751func MaxPool3D(scope *Scope, input tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPool3DAttr) (output tf.Output) {
25752	if scope.Err() != nil {
25753		return
25754	}
25755	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
25756	for _, a := range optional {
25757		a(attrs)
25758	}
25759	opspec := tf.OpSpec{
25760		Type: "MaxPool3D",
25761		Input: []tf.Input{
25762			input,
25763		},
25764		Attrs: attrs,
25765	}
25766	op := scope.AddOperation(opspec)
25767	return op.Output(0)
25768}
25769
25770// Deprecated. Use TensorArrayCloseV3
25771//
25772// DEPRECATED at GraphDef version 26: Use TensorArrayCloseV3
25773//
25774// Returns the created operation.
25775func TensorArrayCloseV2(scope *Scope, handle tf.Output) (o *tf.Operation) {
25776	if scope.Err() != nil {
25777		return
25778	}
25779	opspec := tf.OpSpec{
25780		Type: "TensorArrayCloseV2",
25781		Input: []tf.Input{
25782			handle,
25783		},
25784	}
25785	return scope.AddOperation(opspec)
25786}
25787
25788// Computes the Kth order statistic of a data set. The current
25789//
25790// implementation uses a binary search requiring exactly 32 passes over
25791// the input data. The running time is linear with respect to input
25792// size. The median-of-medians algorithm is probably faster, but is
25793// difficult to implement efficiently in XLA. The implementation imposes
25794// a total ordering on floats. The ordering is consistent with the usual
25795// partial order.  Positive NaNs are greater than positive
25796// infinity. Negative NaNs are less than negative infinity. NaNs with
25797// distinct payloads are treated as distinct. Subnormal numbers are
25798// preserved (not flushed to zero). Positive infinity is greater than all
25799// numbers. Negative infinity is less than all numbers. Positive is
25800// greater than negative zero. There are less than k values greater than
25801// the kth order statistic. There are at least k values greater than or
25802// equal to the Kth order statistic. The semantics are not the same as
25803// top_k_unique.
25804func KthOrderStatistic(scope *Scope, input tf.Output, k int64) (output tf.Output) {
25805	if scope.Err() != nil {
25806		return
25807	}
25808	attrs := map[string]interface{}{"k": k}
25809	opspec := tf.OpSpec{
25810		Type: "KthOrderStatistic",
25811		Input: []tf.Input{
25812			input,
25813		},
25814		Attrs: attrs,
25815	}
25816	op := scope.AddOperation(opspec)
25817	return op.Output(0)
25818}
25819
25820// AvgPool3DGradAttr is an optional argument to AvgPool3DGrad.
25821type AvgPool3DGradAttr func(optionalAttr)
25822
25823// AvgPool3DGradDataFormat sets the optional data_format attribute to value.
25824//
25825// value: The data format of the input and output data. With the
25826// default format "NDHWC", the data is stored in the order of:
25827//     [batch, in_depth, in_height, in_width, in_channels].
25828// Alternatively, the format could be "NCDHW", the data storage order is:
25829//     [batch, in_channels, in_depth, in_height, in_width].
25830// If not specified, defaults to "NDHWC"
25831func AvgPool3DGradDataFormat(value string) AvgPool3DGradAttr {
25832	return func(m optionalAttr) {
25833		m["data_format"] = value
25834	}
25835}
25836
25837// Computes gradients of average pooling function.
25838//
25839// Arguments:
25840//	orig_input_shape: The original input dimensions.
25841//	grad: Output backprop of shape `[batch, depth, rows, cols, channels]`.
25842//	ksize: 1-D tensor of length 5. The size of the window for each dimension of
25843// the input tensor. Must have `ksize[0] = ksize[4] = 1`.
25844//	strides: 1-D tensor of length 5. The stride of the sliding window for each
25845// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
25846//	padding: The type of padding algorithm to use.
25847//
25848// Returns The backprop for input.
25849func AvgPool3DGrad(scope *Scope, orig_input_shape tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...AvgPool3DGradAttr) (output tf.Output) {
25850	if scope.Err() != nil {
25851		return
25852	}
25853	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
25854	for _, a := range optional {
25855		a(attrs)
25856	}
25857	opspec := tf.OpSpec{
25858		Type: "AvgPool3DGrad",
25859		Input: []tf.Input{
25860			orig_input_shape, grad,
25861		},
25862		Attrs: attrs,
25863	}
25864	op := scope.AddOperation(opspec)
25865	return op.Output(0)
25866}
25867
25868// Conv3DBackpropFilterAttr is an optional argument to Conv3DBackpropFilter.
25869type Conv3DBackpropFilterAttr func(optionalAttr)
25870
25871// Conv3DBackpropFilterDilations sets the optional dilations attribute to value.
25872// If not specified, defaults to <i:1 i:1 i:1 i:1 i:1 >
25873func Conv3DBackpropFilterDilations(value []int64) Conv3DBackpropFilterAttr {
25874	return func(m optionalAttr) {
25875		m["dilations"] = value
25876	}
25877}
25878
25879// Computes the gradients of 3-D convolution with respect to the filter.
25880//
25881// DEPRECATED at GraphDef version 10: Use Conv3DBackpropFilterV2
25882//
25883// Arguments:
25884//	input: Shape `[batch, depth, rows, cols, in_channels]`.
25885//	filter: Shape `[depth, rows, cols, in_channels, out_channels]`.
25886// `in_channels` must match between `input` and `filter`.
25887//	out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
25888// out_channels]`.
25889//	strides: 1-D tensor of length 5. The stride of the sliding window for each
25890// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
25891//	padding: The type of padding algorithm to use.
25892func Conv3DBackpropFilter(scope *Scope, input tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv3DBackpropFilterAttr) (output tf.Output) {
25893	if scope.Err() != nil {
25894		return
25895	}
25896	attrs := map[string]interface{}{"strides": strides, "padding": padding}
25897	for _, a := range optional {
25898		a(attrs)
25899	}
25900	opspec := tf.OpSpec{
25901		Type: "Conv3DBackpropFilter",
25902		Input: []tf.Input{
25903			input, filter, out_backprop,
25904		},
25905		Attrs: attrs,
25906	}
25907	op := scope.AddOperation(opspec)
25908	return op.Output(0)
25909}
25910
25911// Conv3DAttr is an optional argument to Conv3D.
25912type Conv3DAttr func(optionalAttr)
25913
25914// Conv3DDataFormat sets the optional data_format attribute to value.
25915//
25916// value: The data format of the input and output data. With the
25917// default format "NDHWC", the data is stored in the order of:
25918//     [batch, in_depth, in_height, in_width, in_channels].
25919// Alternatively, the format could be "NCDHW", the data storage order is:
25920//     [batch, in_channels, in_depth, in_height, in_width].
25921// If not specified, defaults to "NDHWC"
25922func Conv3DDataFormat(value string) Conv3DAttr {
25923	return func(m optionalAttr) {
25924		m["data_format"] = value
25925	}
25926}
25927
25928// Conv3DDilations sets the optional dilations attribute to value.
25929//
25930// value: 1-D tensor of length 5.  The dilation factor for each dimension of
25931// `input`. If set to k > 1, there will be k-1 skipped cells between each
25932// filter element on that dimension. The dimension order is determined by the
25933// value of `data_format`, see above for details. Dilations in the batch and
25934// depth dimensions must be 1.
25935// If not specified, defaults to <i:1 i:1 i:1 i:1 i:1 >
25936func Conv3DDilations(value []int64) Conv3DAttr {
25937	return func(m optionalAttr) {
25938		m["dilations"] = value
25939	}
25940}
25941
25942// Computes a 3-D convolution given 5-D `input` and `filter` tensors.
25943//
25944// In signal processing, cross-correlation is a measure of similarity of
25945// two waveforms as a function of a time-lag applied to one of them. This
25946// is also known as a sliding dot product or sliding inner-product.
25947//
25948// Our Conv3D implements a form of cross-correlation.
25949//
25950// Arguments:
25951//	input: Shape `[batch, in_depth, in_height, in_width, in_channels]`.
25952//	filter: Shape `[filter_depth, filter_height, filter_width, in_channels,
25953// out_channels]`. `in_channels` must match between `input` and `filter`.
25954//	strides: 1-D tensor of length 5. The stride of the sliding window for each
25955// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
25956//	padding: The type of padding algorithm to use.
25957func Conv3D(scope *Scope, input tf.Output, filter tf.Output, strides []int64, padding string, optional ...Conv3DAttr) (output tf.Output) {
25958	if scope.Err() != nil {
25959		return
25960	}
25961	attrs := map[string]interface{}{"strides": strides, "padding": padding}
25962	for _, a := range optional {
25963		a(attrs)
25964	}
25965	opspec := tf.OpSpec{
25966		Type: "Conv3D",
25967		Input: []tf.Input{
25968			input, filter,
25969		},
25970		Attrs: attrs,
25971	}
25972	op := scope.AddOperation(opspec)
25973	return op.Output(0)
25974}
25975
25976// Returns a list which has the passed-in `Tensor` as last element and the other elements of the given list in `input_handle`.
25977//
25978// tensor: The tensor to put on the list.
25979// input_handle: The old list.
25980// output_handle: A list with the elements of the old list followed by tensor.
25981// element_dtype: the type of elements in the list.
25982// element_shape: a shape compatible with that of elements in the list.
25983func TensorListPushBack(scope *Scope, input_handle tf.Output, tensor tf.Output) (output_handle tf.Output) {
25984	if scope.Err() != nil {
25985		return
25986	}
25987	opspec := tf.OpSpec{
25988		Type: "TensorListPushBack",
25989		Input: []tf.Input{
25990			input_handle, tensor,
25991		},
25992	}
25993	op := scope.AddOperation(opspec)
25994	return op.Output(0)
25995}
25996
25997// Returns which elements of x are NaN.
25998//
25999// @compatibility(numpy)
26000// Equivalent to np.isnan
26001// @end_compatibility
26002//
26003// Example:
26004//
26005// ```python
26006// x = tf.constant([5.0, np.nan, 6.8, np.nan, np.inf])
26007// tf.math.is_nan(x) ==> [False, True, False, True, False]
26008// ```
26009func IsNan(scope *Scope, x tf.Output) (y tf.Output) {
26010	if scope.Err() != nil {
26011		return
26012	}
26013	opspec := tf.OpSpec{
26014		Type: "IsNan",
26015		Input: []tf.Input{
26016			x,
26017		},
26018	}
26019	op := scope.AddOperation(opspec)
26020	return op.Output(0)
26021}
26022
26023// Adds a value to the current value of a variable.
26024//
26025// Any ReadVariableOp with a control dependency on this op is guaranteed to
26026// see the incremented value or a subsequent newer one.
26027//
26028// Arguments:
26029//	resource: handle to the resource in which to store the variable.
26030//	value: the value by which the variable will be incremented.
26031//
26032// Returns the created operation.
26033func AssignAddVariableOp(scope *Scope, resource tf.Output, value tf.Output) (o *tf.Operation) {
26034	if scope.Err() != nil {
26035		return
26036	}
26037	opspec := tf.OpSpec{
26038		Type: "AssignAddVariableOp",
26039		Input: []tf.Input{
26040			resource, value,
26041		},
26042	}
26043	return scope.AddOperation(opspec)
26044}
26045
26046// DepthwiseConv2dNativeBackpropInputAttr is an optional argument to DepthwiseConv2dNativeBackpropInput.
26047type DepthwiseConv2dNativeBackpropInputAttr func(optionalAttr)
26048
26049// DepthwiseConv2dNativeBackpropInputExplicitPaddings sets the optional explicit_paddings attribute to value.
26050// If not specified, defaults to <>
26051func DepthwiseConv2dNativeBackpropInputExplicitPaddings(value []int64) DepthwiseConv2dNativeBackpropInputAttr {
26052	return func(m optionalAttr) {
26053		m["explicit_paddings"] = value
26054	}
26055}
26056
26057// DepthwiseConv2dNativeBackpropInputDataFormat sets the optional data_format attribute to value.
26058//
26059// value: Specify the data format of the input and output data. With the
26060// default format "NHWC", the data is stored in the order of:
26061//     [batch, height, width, channels].
26062// Alternatively, the format could be "NCHW", the data storage order of:
26063//     [batch, channels, height, width].
26064// If not specified, defaults to "NHWC"
26065func DepthwiseConv2dNativeBackpropInputDataFormat(value string) DepthwiseConv2dNativeBackpropInputAttr {
26066	return func(m optionalAttr) {
26067		m["data_format"] = value
26068	}
26069}
26070
26071// DepthwiseConv2dNativeBackpropInputDilations sets the optional dilations attribute to value.
26072//
26073// value: 1-D tensor of length 4.  The dilation factor for each dimension of
26074// `input`. If set to k > 1, there will be k-1 skipped cells between each filter
26075// element on that dimension. The dimension order is determined by the value of
26076// `data_format`, see above for details. Dilations in the batch and depth
26077// dimensions must be 1.
26078// If not specified, defaults to <i:1 i:1 i:1 i:1 >
26079func DepthwiseConv2dNativeBackpropInputDilations(value []int64) DepthwiseConv2dNativeBackpropInputAttr {
26080	return func(m optionalAttr) {
26081		m["dilations"] = value
26082	}
26083}
26084
26085// Computes the gradients of depthwise convolution with respect to the input.
26086//
26087// Arguments:
26088//	input_sizes: An integer vector representing the shape of `input`, based
26089// on `data_format`.  For example, if `data_format` is 'NHWC' then
26090//  `input` is a 4-D `[batch, height, width, channels]` tensor.
26091//	filter: 4-D with shape
26092// `[filter_height, filter_width, in_channels, depthwise_multiplier]`.
26093//	out_backprop: 4-D with shape  based on `data_format`.
26094// For example, if `data_format` is 'NHWC' then
26095// out_backprop shape is `[batch, out_height, out_width, out_channels]`.
26096// Gradients w.r.t. the output of the convolution.
26097//	strides: The stride of the sliding window for each dimension of the input
26098// of the convolution.
26099//	padding: The type of padding algorithm to use.
26100//
26101// Returns 4-D with shape according to `data_format`.  For example, if
26102// `data_format` is 'NHWC', output shape is `[batch, in_height,
26103// in_width, in_channels]`.  Gradient w.r.t. the input of the
26104// convolution.
26105func DepthwiseConv2dNativeBackpropInput(scope *Scope, input_sizes tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...DepthwiseConv2dNativeBackpropInputAttr) (output tf.Output) {
26106	if scope.Err() != nil {
26107		return
26108	}
26109	attrs := map[string]interface{}{"strides": strides, "padding": padding}
26110	for _, a := range optional {
26111		a(attrs)
26112	}
26113	opspec := tf.OpSpec{
26114		Type: "DepthwiseConv2dNativeBackpropInput",
26115		Input: []tf.Input{
26116			input_sizes, filter, out_backprop,
26117		},
26118		Attrs: attrs,
26119	}
26120	op := scope.AddOperation(opspec)
26121	return op.Output(0)
26122}
26123
26124// Updates the table to associates keys with values.
26125//
26126// The tensor `keys` must be of the same type as the keys of the table.
26127// The tensor `values` must be of the type of the table values.
26128//
26129// Arguments:
26130//	table_handle: Handle to the table.
26131//	keys: Any shape.  Keys to look up.
26132//	values: Values to associate with keys.
26133//
26134// Returns the created operation.
26135func LookupTableInsertV2(scope *Scope, table_handle tf.Output, keys tf.Output, values tf.Output) (o *tf.Operation) {
26136	if scope.Err() != nil {
26137		return
26138	}
26139	opspec := tf.OpSpec{
26140		Type: "LookupTableInsertV2",
26141		Input: []tf.Input{
26142			table_handle, keys, values,
26143		},
26144	}
26145	return scope.AddOperation(opspec)
26146}
26147
26148// Component-wise multiplies a SparseTensor by a dense Tensor.
26149//
26150// The output locations corresponding to the implicitly zero elements in the sparse
26151// tensor will be zero (i.e., will not take up storage space), regardless of the
26152// contents of the dense tensor (even if it's +/-INF and that INF*0 == NaN).
26153//
26154// *Limitation*: this Op only broadcasts the dense side to the sparse side, but not
26155// the other direction.
26156//
26157// Arguments:
26158//	sp_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
26159// SparseTensor, possibly not in canonical ordering.
26160//	sp_values: 1-D.  `N` non-empty values corresponding to `sp_indices`.
26161//	sp_shape: 1-D.  Shape of the input SparseTensor.
26162//	dense: `R`-D.  The dense Tensor operand.
26163//
26164// Returns 1-D.  The `N` values that are operated on.
26165func SparseDenseCwiseMul(scope *Scope, sp_indices tf.Output, sp_values tf.Output, sp_shape tf.Output, dense tf.Output) (output tf.Output) {
26166	if scope.Err() != nil {
26167		return
26168	}
26169	opspec := tf.OpSpec{
26170		Type: "SparseDenseCwiseMul",
26171		Input: []tf.Input{
26172			sp_indices, sp_values, sp_shape, dense,
26173		},
26174	}
26175	op := scope.AddOperation(opspec)
26176	return op.Output(0)
26177}
26178
26179// Performs a padding as a preprocess during a convolution.
26180//
26181// Similar to FusedResizeAndPadConv2d, this op allows for an optimized
26182// implementation where the spatial padding transformation stage is fused with the
26183// im2col lookup, but in this case without the bilinear filtering required for
26184// resizing. Fusing the padding prevents the need to write out the intermediate
26185// results as whole tensors, reducing memory pressure, and we can get some latency
26186// gains by merging the transformation calculations.
26187// The data_format attribute for Conv2D isn't supported by this op, and 'NHWC'
26188// order is used instead.
26189// Internally this op uses a single per-graph scratch buffer, which means that it
26190// will block if multiple versions are being run in parallel. This is because this
26191// operator is primarily an optimization to minimize memory usage.
26192//
26193// Arguments:
26194//	input: 4-D with shape `[batch, in_height, in_width, in_channels]`.
26195//	paddings: A two-column matrix specifying the padding sizes. The number of
26196// rows must be the same as the rank of `input`.
26197//	filter: 4-D with shape
26198// `[filter_height, filter_width, in_channels, out_channels]`.
26199//
26200//	strides: 1-D of length 4.  The stride of the sliding window for each dimension
26201// of `input`. Must be in the same order as the dimension specified with format.
26202//	padding: The type of padding algorithm to use.
26203func FusedPadConv2D(scope *Scope, input tf.Output, paddings tf.Output, filter tf.Output, mode string, strides []int64, padding string) (output tf.Output) {
26204	if scope.Err() != nil {
26205		return
26206	}
26207	attrs := map[string]interface{}{"mode": mode, "strides": strides, "padding": padding}
26208	opspec := tf.OpSpec{
26209		Type: "FusedPadConv2D",
26210		Input: []tf.Input{
26211			input, paddings, filter,
26212		},
26213		Attrs: attrs,
26214	}
26215	op := scope.AddOperation(opspec)
26216	return op.Output(0)
26217}
26218
26219// List of the given size with empty elements.
26220//
26221// element_shape: the shape of the future elements of the list
26222// num_elements: the number of elements to reserve
26223// handle: the output list
26224// element_dtype: the desired type of elements in the list.
26225func TensorListReserve(scope *Scope, element_shape tf.Output, num_elements tf.Output, element_dtype tf.DataType) (handle tf.Output) {
26226	if scope.Err() != nil {
26227		return
26228	}
26229	attrs := map[string]interface{}{"element_dtype": element_dtype}
26230	opspec := tf.OpSpec{
26231		Type: "TensorListReserve",
26232		Input: []tf.Input{
26233			element_shape, num_elements,
26234		},
26235		Attrs: attrs,
26236	}
26237	op := scope.AddOperation(opspec)
26238	return op.Output(0)
26239}
26240
26241// Clips tensor values to a specified min and max.
26242//
26243// Given a tensor `t`, this operation returns a tensor of the same type and
26244// shape as `t` with its values clipped to `clip_value_min` and `clip_value_max`.
26245// Any values less than `clip_value_min` are set to `clip_value_min`. Any values
26246// greater than `clip_value_max` are set to `clip_value_max`.
26247//
26248// Arguments:
26249//	t: A `Tensor`.
26250//	clip_value_min: A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape
26251// as `t`. The minimum value to clip by.
26252//	clip_value_max: A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape
26253// as `t`. The maximum value to clip by.
26254//
26255// Returns A clipped `Tensor` with the same shape as input 't'.
26256func ClipByValue(scope *Scope, t tf.Output, clip_value_min tf.Output, clip_value_max tf.Output) (output tf.Output) {
26257	if scope.Err() != nil {
26258		return
26259	}
26260	opspec := tf.OpSpec{
26261		Type: "ClipByValue",
26262		Input: []tf.Input{
26263			t, clip_value_min, clip_value_max,
26264		},
26265	}
26266	op := scope.AddOperation(opspec)
26267	return op.Output(0)
26268}
26269
26270// Conv2DBackpropFilterAttr is an optional argument to Conv2DBackpropFilter.
26271type Conv2DBackpropFilterAttr func(optionalAttr)
26272
26273// Conv2DBackpropFilterUseCudnnOnGpu sets the optional use_cudnn_on_gpu attribute to value.
26274// If not specified, defaults to true
26275func Conv2DBackpropFilterUseCudnnOnGpu(value bool) Conv2DBackpropFilterAttr {
26276	return func(m optionalAttr) {
26277		m["use_cudnn_on_gpu"] = value
26278	}
26279}
26280
26281// Conv2DBackpropFilterExplicitPaddings sets the optional explicit_paddings attribute to value.
26282//
26283// value: If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith
26284// dimension, the amount of padding inserted before and after the dimension is
26285// `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If
26286// `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty.
26287// If not specified, defaults to <>
26288func Conv2DBackpropFilterExplicitPaddings(value []int64) Conv2DBackpropFilterAttr {
26289	return func(m optionalAttr) {
26290		m["explicit_paddings"] = value
26291	}
26292}
26293
26294// Conv2DBackpropFilterDataFormat sets the optional data_format attribute to value.
26295//
26296// value: Specify the data format of the input and output data. With the
26297// default format "NHWC", the data is stored in the order of:
26298//     [batch, in_height, in_width, in_channels].
26299// Alternatively, the format could be "NCHW", the data storage order of:
26300//     [batch, in_channels, in_height, in_width].
26301// If not specified, defaults to "NHWC"
26302func Conv2DBackpropFilterDataFormat(value string) Conv2DBackpropFilterAttr {
26303	return func(m optionalAttr) {
26304		m["data_format"] = value
26305	}
26306}
26307
26308// Conv2DBackpropFilterDilations sets the optional dilations attribute to value.
26309//
26310// value: 1-D tensor of length 4.  The dilation factor for each dimension of
26311// `input`. If set to k > 1, there will be k-1 skipped cells between each filter
26312// element on that dimension. The dimension order is determined by the value of
26313// `data_format`, see above for details. Dilations in the batch and depth
26314// dimensions must be 1.
26315// If not specified, defaults to <i:1 i:1 i:1 i:1 >
26316func Conv2DBackpropFilterDilations(value []int64) Conv2DBackpropFilterAttr {
26317	return func(m optionalAttr) {
26318		m["dilations"] = value
26319	}
26320}
26321
26322// Computes the gradients of convolution with respect to the filter.
26323//
26324// Arguments:
26325//	input: 4-D with shape `[batch, in_height, in_width, in_channels]`.
26326//	filter_sizes: An integer vector representing the tensor shape of `filter`,
26327// where `filter` is a 4-D
26328// `[filter_height, filter_width, in_channels, out_channels]` tensor.
26329//	out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`.
26330// Gradients w.r.t. the output of the convolution.
26331//	strides: The stride of the sliding window for each dimension of the input
26332// of the convolution. Must be in the same order as the dimension specified with
26333// format.
26334//	padding: The type of padding algorithm to use.
26335//
26336// Returns 4-D with shape
26337// `[filter_height, filter_width, in_channels, out_channels]`.  Gradient w.r.t.
26338// the `filter` input of the convolution.
26339func Conv2DBackpropFilter(scope *Scope, input tf.Output, filter_sizes tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv2DBackpropFilterAttr) (output tf.Output) {
26340	if scope.Err() != nil {
26341		return
26342	}
26343	attrs := map[string]interface{}{"strides": strides, "padding": padding}
26344	for _, a := range optional {
26345		a(attrs)
26346	}
26347	opspec := tf.OpSpec{
26348		Type: "Conv2DBackpropFilter",
26349		Input: []tf.Input{
26350			input, filter_sizes, out_backprop,
26351		},
26352		Attrs: attrs,
26353	}
26354	op := scope.AddOperation(opspec)
26355	return op.Output(0)
26356}
26357
26358// Returns the truth value of x OR y element-wise.
26359//
26360// *NOTE*: `LogicalOr` supports broadcasting. More about broadcasting
26361// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
26362func LogicalOr(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
26363	if scope.Err() != nil {
26364		return
26365	}
26366	opspec := tf.OpSpec{
26367		Type: "LogicalOr",
26368		Input: []tf.Input{
26369			x, y,
26370		},
26371	}
26372	op := scope.AddOperation(opspec)
26373	return op.Output(0)
26374}
26375
26376// Adds `bias` to `value`.
26377//
26378// This is a deprecated version of BiasAdd and will be soon removed.
26379//
26380// This is a special case of `tf.add` where `bias` is restricted to be 1-D.
26381// Broadcasting is supported, so `value` may have any number of dimensions.
26382//
26383// Arguments:
26384//	value: Any number of dimensions.
26385//	bias: 1-D with size the last dimension of `value`.
26386//
26387// Returns Broadcasted sum of `value` and `bias`.
26388func BiasAddV1(scope *Scope, value tf.Output, bias tf.Output) (output tf.Output) {
26389	if scope.Err() != nil {
26390		return
26391	}
26392	opspec := tf.OpSpec{
26393		Type: "BiasAddV1",
26394		Input: []tf.Input{
26395			value, bias,
26396		},
26397	}
26398	op := scope.AddOperation(opspec)
26399	return op.Output(0)
26400}
26401
26402// Creates a Dataset that returns pseudorandom numbers.
26403//
26404// Creates a Dataset that returns a stream of uniformly distributed
26405// pseudorandom 64-bit signed integers.
26406//
26407// In the TensorFlow Python API, you can instantiate this dataset via the
26408// class `tf.data.experimental.RandomDataset`.
26409//
26410// Instances of this dataset are also created as a result of the
26411// `hoist_random_uniform` static optimization. Whether this optimization is
26412// performed is determined by the `experimental_optimization.hoist_random_uniform`
26413// option of `tf.data.Options`.
26414//
26415// Arguments:
26416//	seed: A scalar seed for the random number generator. If either seed or
26417// seed2 is set to be non-zero, the random number generator is seeded
26418// by the given seed.  Otherwise, a random seed is used.
26419//	seed2: A second scalar seed to avoid seed collision.
26420//
26421//
26422func RandomDataset(scope *Scope, seed tf.Output, seed2 tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
26423	if scope.Err() != nil {
26424		return
26425	}
26426	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
26427	opspec := tf.OpSpec{
26428		Type: "RandomDataset",
26429		Input: []tf.Input{
26430			seed, seed2,
26431		},
26432		Attrs: attrs,
26433	}
26434	op := scope.AddOperation(opspec)
26435	return op.Output(0)
26436}
26437
26438// FractionalAvgPoolAttr is an optional argument to FractionalAvgPool.
26439type FractionalAvgPoolAttr func(optionalAttr)
26440
26441// FractionalAvgPoolPseudoRandom sets the optional pseudo_random attribute to value.
26442//
26443// value: When set to True, generates the pooling sequence in a
26444// pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin
26445// Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for
26446// difference between pseudorandom and random.
26447// If not specified, defaults to false
26448func FractionalAvgPoolPseudoRandom(value bool) FractionalAvgPoolAttr {
26449	return func(m optionalAttr) {
26450		m["pseudo_random"] = value
26451	}
26452}
26453
26454// FractionalAvgPoolOverlapping sets the optional overlapping attribute to value.
26455//
26456// value: When set to True, it means when pooling, the values at the boundary
26457// of adjacent pooling cells are used by both cells. For example:
26458//
26459// `index  0  1  2  3  4`
26460//
26461// `value  20 5  16 3  7`
26462//
26463// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
26464// The result would be [41/3, 26/3] for fractional avg pooling.
26465// If not specified, defaults to false
26466func FractionalAvgPoolOverlapping(value bool) FractionalAvgPoolAttr {
26467	return func(m optionalAttr) {
26468		m["overlapping"] = value
26469	}
26470}
26471
26472// FractionalAvgPoolDeterministic sets the optional deterministic attribute to value.
26473//
26474// value: When set to True, a fixed pooling region will be used when
26475// iterating over a FractionalAvgPool node in the computation graph. Mainly used
26476// in unit test to make FractionalAvgPool deterministic.
26477// If not specified, defaults to false
26478func FractionalAvgPoolDeterministic(value bool) FractionalAvgPoolAttr {
26479	return func(m optionalAttr) {
26480		m["deterministic"] = value
26481	}
26482}
26483
26484// FractionalAvgPoolSeed sets the optional seed attribute to value.
26485//
26486// value: If either seed or seed2 are set to be non-zero, the random number
26487// generator is seeded by the given seed.  Otherwise, it is seeded by a
26488// random seed.
26489// If not specified, defaults to 0
26490func FractionalAvgPoolSeed(value int64) FractionalAvgPoolAttr {
26491	return func(m optionalAttr) {
26492		m["seed"] = value
26493	}
26494}
26495
26496// FractionalAvgPoolSeed2 sets the optional seed2 attribute to value.
26497//
26498// value: An second seed to avoid seed collision.
26499// If not specified, defaults to 0
26500func FractionalAvgPoolSeed2(value int64) FractionalAvgPoolAttr {
26501	return func(m optionalAttr) {
26502		m["seed2"] = value
26503	}
26504}
26505
26506// Performs fractional average pooling on the input.
26507//
26508// Fractional average pooling is similar to Fractional max pooling in the pooling
26509// region generation step. The only difference is that after pooling regions are
26510// generated, a mean operation is performed instead of a max operation in each
26511// pooling region.
26512//
26513// Arguments:
26514//	value: 4-D with shape `[batch, height, width, channels]`.
26515//	pooling_ratio: Pooling ratio for each dimension of `value`, currently only
26516// supports row and col dimension and should be >= 1.0. For example, a valid
26517// pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements
26518// must be 1.0 because we don't allow pooling on batch and channels
26519// dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions
26520// respectively.
26521//
26522// Returns:
26523//	output: output tensor after fractional avg pooling.
26524//	row_pooling_sequence: row pooling sequence, needed to calculate gradient.
26525//	col_pooling_sequence: column pooling sequence, needed to calculate gradient.
26526func FractionalAvgPool(scope *Scope, value tf.Output, pooling_ratio []float32, optional ...FractionalAvgPoolAttr) (output tf.Output, row_pooling_sequence tf.Output, col_pooling_sequence tf.Output) {
26527	if scope.Err() != nil {
26528		return
26529	}
26530	attrs := map[string]interface{}{"pooling_ratio": pooling_ratio}
26531	for _, a := range optional {
26532		a(attrs)
26533	}
26534	opspec := tf.OpSpec{
26535		Type: "FractionalAvgPool",
26536		Input: []tf.Input{
26537			value,
26538		},
26539		Attrs: attrs,
26540	}
26541	op := scope.AddOperation(opspec)
26542	return op.Output(0), op.Output(1), op.Output(2)
26543}
26544
26545// MapStageAttr is an optional argument to MapStage.
26546type MapStageAttr func(optionalAttr)
26547
26548// MapStageCapacity sets the optional capacity attribute to value.
26549//
26550// value: Maximum number of elements in the Staging Area. If > 0, inserts
26551// on the container will block when the capacity is reached.
26552// If not specified, defaults to 0
26553//
26554// REQUIRES: value >= 0
26555func MapStageCapacity(value int64) MapStageAttr {
26556	return func(m optionalAttr) {
26557		m["capacity"] = value
26558	}
26559}
26560
26561// MapStageMemoryLimit sets the optional memory_limit attribute to value.
26562// If not specified, defaults to 0
26563//
26564// REQUIRES: value >= 0
26565func MapStageMemoryLimit(value int64) MapStageAttr {
26566	return func(m optionalAttr) {
26567		m["memory_limit"] = value
26568	}
26569}
26570
26571// MapStageContainer sets the optional container attribute to value.
26572//
26573// value: If non-empty, this queue is placed in the given container. Otherwise,
26574// a default container is used.
26575// If not specified, defaults to ""
26576func MapStageContainer(value string) MapStageAttr {
26577	return func(m optionalAttr) {
26578		m["container"] = value
26579	}
26580}
26581
26582// MapStageSharedName sets the optional shared_name attribute to value.
26583//
26584// value: It is necessary to match this name to the matching Unstage Op.
26585// If not specified, defaults to ""
26586func MapStageSharedName(value string) MapStageAttr {
26587	return func(m optionalAttr) {
26588		m["shared_name"] = value
26589	}
26590}
26591
26592// Stage (key, values) in the underlying container which behaves like a hashtable.
26593//
26594// Arguments:
26595//	key: int64
26596//
26597//	values: a list of tensors
26598// dtypes A list of data types that inserted values should adhere to.
26599//
26600//
26601// Returns the created operation.
26602func MapStage(scope *Scope, key tf.Output, indices tf.Output, values []tf.Output, dtypes []tf.DataType, optional ...MapStageAttr) (o *tf.Operation) {
26603	if scope.Err() != nil {
26604		return
26605	}
26606	attrs := map[string]interface{}{"dtypes": dtypes}
26607	for _, a := range optional {
26608		a(attrs)
26609	}
26610	opspec := tf.OpSpec{
26611		Type: "MapStage",
26612		Input: []tf.Input{
26613			key, indices, tf.OutputList(values),
26614		},
26615		Attrs: attrs,
26616	}
26617	return scope.AddOperation(opspec)
26618}
26619
26620// SparseTensorDenseMatMulAttr is an optional argument to SparseTensorDenseMatMul.
26621type SparseTensorDenseMatMulAttr func(optionalAttr)
26622
26623// SparseTensorDenseMatMulAdjointA sets the optional adjoint_a attribute to value.
26624//
26625// value: Use the adjoint of A in the matrix multiply.  If A is complex, this
26626// is transpose(conj(A)).  Otherwise it's transpose(A).
26627// If not specified, defaults to false
26628func SparseTensorDenseMatMulAdjointA(value bool) SparseTensorDenseMatMulAttr {
26629	return func(m optionalAttr) {
26630		m["adjoint_a"] = value
26631	}
26632}
26633
26634// SparseTensorDenseMatMulAdjointB sets the optional adjoint_b attribute to value.
26635//
26636// value: Use the adjoint of B in the matrix multiply.  If B is complex, this
26637// is transpose(conj(B)).  Otherwise it's transpose(B).
26638// If not specified, defaults to false
26639func SparseTensorDenseMatMulAdjointB(value bool) SparseTensorDenseMatMulAttr {
26640	return func(m optionalAttr) {
26641		m["adjoint_b"] = value
26642	}
26643}
26644
26645// Multiply SparseTensor (of rank 2) "A" by dense matrix "B".
26646//
26647// No validity checking is performed on the indices of A.  However, the following
26648// input format is recommended for optimal behavior:
26649//
26650// if adjoint_a == false:
26651//   A should be sorted in lexicographically increasing order.  Use SparseReorder
26652//   if you're not sure.
26653// if adjoint_a == true:
26654//   A should be sorted in order of increasing dimension 1 (i.e., "column major"
26655//   order instead of "row major" order).
26656//
26657// Arguments:
26658//	a_indices: 2-D.  The `indices` of the `SparseTensor`, size `[nnz, 2]` Matrix.
26659//	a_values: 1-D.  The `values` of the `SparseTensor`, size `[nnz]` Vector.
26660//	a_shape: 1-D.  The `shape` of the `SparseTensor`, size `[2]` Vector.
26661//	b: 2-D.  A dense Matrix.
26662func SparseTensorDenseMatMul(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, b tf.Output, optional ...SparseTensorDenseMatMulAttr) (product tf.Output) {
26663	if scope.Err() != nil {
26664		return
26665	}
26666	attrs := map[string]interface{}{}
26667	for _, a := range optional {
26668		a(attrs)
26669	}
26670	opspec := tf.OpSpec{
26671		Type: "SparseTensorDenseMatMul",
26672		Input: []tf.Input{
26673			a_indices, a_values, a_shape, b,
26674		},
26675		Attrs: attrs,
26676	}
26677	op := scope.AddOperation(opspec)
26678	return op.Output(0)
26679}
26680
26681// FusedBatchNormGradV2Attr is an optional argument to FusedBatchNormGradV2.
26682type FusedBatchNormGradV2Attr func(optionalAttr)
26683
26684// FusedBatchNormGradV2Epsilon sets the optional epsilon attribute to value.
26685//
26686// value: A small float number added to the variance of x.
26687// If not specified, defaults to 0.0001
26688func FusedBatchNormGradV2Epsilon(value float32) FusedBatchNormGradV2Attr {
26689	return func(m optionalAttr) {
26690		m["epsilon"] = value
26691	}
26692}
26693
26694// FusedBatchNormGradV2DataFormat sets the optional data_format attribute to value.
26695//
26696// value: The data format for y_backprop, x, x_backprop.
26697// Either "NHWC" (default) or "NCHW".
26698// If not specified, defaults to "NHWC"
26699func FusedBatchNormGradV2DataFormat(value string) FusedBatchNormGradV2Attr {
26700	return func(m optionalAttr) {
26701		m["data_format"] = value
26702	}
26703}
26704
26705// FusedBatchNormGradV2IsTraining sets the optional is_training attribute to value.
26706//
26707// value: A bool value to indicate the operation is for training (default)
26708// or inference.
26709// If not specified, defaults to true
26710func FusedBatchNormGradV2IsTraining(value bool) FusedBatchNormGradV2Attr {
26711	return func(m optionalAttr) {
26712		m["is_training"] = value
26713	}
26714}
26715
26716// Gradient for batch normalization.
26717//
26718// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
26719// The size of 1D Tensors matches the dimension C of the 4D Tensors.
26720//
26721// Arguments:
26722//	y_backprop: A 4D Tensor for the gradient with respect to y.
26723//	x: A 4D Tensor for input data.
26724//	scale: A 1D Tensor for scaling factor, to scale the normalized x.
26725//	reserve_space_1: When is_training is True, a 1D Tensor for the computed batch
26726// mean to be reused in gradient computation. When is_training is
26727// False, a 1D Tensor for the population mean to be reused in both
26728// 1st and 2nd order gradient computation.
26729//	reserve_space_2: When is_training is True, a 1D Tensor for the computed batch
26730// variance (inverted variance in the cuDNN case) to be reused in
26731// gradient computation. When is_training is False, a 1D Tensor
26732// for the population variance to be reused in both 1st and 2nd
26733// order gradient computation.
26734//
26735// Returns:
26736//	x_backprop: A 4D Tensor for the gradient with respect to x.
26737//	scale_backprop: A 1D Tensor for the gradient with respect to scale.
26738//	offset_backprop: A 1D Tensor for the gradient with respect to offset.
26739//	reserve_space_3: Unused placeholder to match the mean input in FusedBatchNorm.
26740//	reserve_space_4: Unused placeholder to match the variance input
26741// in FusedBatchNorm.
26742func FusedBatchNormGradV2(scope *Scope, y_backprop tf.Output, x tf.Output, scale tf.Output, reserve_space_1 tf.Output, reserve_space_2 tf.Output, optional ...FusedBatchNormGradV2Attr) (x_backprop tf.Output, scale_backprop tf.Output, offset_backprop tf.Output, reserve_space_3 tf.Output, reserve_space_4 tf.Output) {
26743	if scope.Err() != nil {
26744		return
26745	}
26746	attrs := map[string]interface{}{}
26747	for _, a := range optional {
26748		a(attrs)
26749	}
26750	opspec := tf.OpSpec{
26751		Type: "FusedBatchNormGradV2",
26752		Input: []tf.Input{
26753			y_backprop, x, scale, reserve_space_1, reserve_space_2,
26754		},
26755		Attrs: attrs,
26756	}
26757	op := scope.AddOperation(opspec)
26758	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
26759}
26760
26761// Computes the gradient for the inverse of `x` wrt its input.
26762//
26763// Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy`
26764// is the corresponding input gradient.
26765func InvGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
26766	if scope.Err() != nil {
26767		return
26768	}
26769	opspec := tf.OpSpec{
26770		Type: "InvGrad",
26771		Input: []tf.Input{
26772			y, dy,
26773		},
26774	}
26775	op := scope.AddOperation(opspec)
26776	return op.Output(0)
26777}
26778
26779// Adds up a SparseTensor and a dense Tensor, using these special rules:
26780//
26781// (1) Broadcasts the dense side to have the same shape as the sparse side, if
26782//     eligible;
26783// (2) Then, only the dense values pointed to by the indices of the SparseTensor
26784//     participate in the cwise addition.
26785//
26786// By these rules, the result is a logical SparseTensor with exactly the same
26787// indices and shape, but possibly with different non-zero values.  The output of
26788// this Op is the resultant non-zero values.
26789//
26790// Arguments:
26791//	sp_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
26792// SparseTensor, possibly not in canonical ordering.
26793//	sp_values: 1-D.  `N` non-empty values corresponding to `sp_indices`.
26794//	sp_shape: 1-D.  Shape of the input SparseTensor.
26795//	dense: `R`-D.  The dense Tensor operand.
26796//
26797// Returns 1-D.  The `N` values that are operated on.
26798func SparseDenseCwiseAdd(scope *Scope, sp_indices tf.Output, sp_values tf.Output, sp_shape tf.Output, dense tf.Output) (output tf.Output) {
26799	if scope.Err() != nil {
26800		return
26801	}
26802	opspec := tf.OpSpec{
26803		Type: "SparseDenseCwiseAdd",
26804		Input: []tf.Input{
26805			sp_indices, sp_values, sp_shape, dense,
26806		},
26807	}
26808	op := scope.AddOperation(opspec)
26809	return op.Output(0)
26810}
26811
26812// Counts the number of occurrences of each value in an integer array.
26813//
26814// Outputs a vector with length `size` and the same dtype as `weights`. If
26815// `weights` are empty, then index `i` stores the number of times the value `i` is
26816// counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of
26817// the value in `weights` at each index where the corresponding value in `arr` is
26818// `i`.
26819//
26820// Values in `arr` outside of the range [0, size) are ignored.
26821//
26822// Arguments:
26823//	arr: int32 `Tensor`.
26824//	size: non-negative int32 scalar `Tensor`.
26825//	weights: is an int32, int64, float32, or float64 `Tensor` with the same
26826// shape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights
26827// equal to 1.
26828//
26829// Returns 1D `Tensor` with length equal to `size`. The counts or summed weights for
26830// each value in the range [0, size).
26831func Bincount(scope *Scope, arr tf.Output, size tf.Output, weights tf.Output) (bins tf.Output) {
26832	if scope.Err() != nil {
26833		return
26834	}
26835	opspec := tf.OpSpec{
26836		Type: "Bincount",
26837		Input: []tf.Input{
26838			arr, size, weights,
26839		},
26840	}
26841	op := scope.AddOperation(opspec)
26842	return op.Output(0)
26843}
26844
26845// Gradients for batch normalization.
26846//
26847// DEPRECATED at GraphDef version 9: Use tf.nn.batch_normalization()
26848//
26849// This op is deprecated. See `tf.nn.batch_normalization`.
26850//
26851// Arguments:
26852//	t: A 4D input Tensor.
26853//	m: A 1D mean Tensor with size matching the last dimension of t.
26854// This is the first output from tf.nn.moments,
26855// or a saved moving average thereof.
26856//	v: A 1D variance Tensor with size matching the last dimension of t.
26857// This is the second output from tf.nn.moments,
26858// or a saved moving average thereof.
26859//	gamma: A 1D gamma Tensor with size matching the last dimension of t.
26860// If "scale_after_normalization" is true, this Tensor will be multiplied
26861// with the normalized Tensor.
26862//	backprop: 4D backprop Tensor.
26863//	variance_epsilon: A small float number to avoid dividing by 0.
26864//	scale_after_normalization: A bool indicating whether the resulted tensor
26865// needs to be multiplied with gamma.
26866//
26867// Returns:
26868//	dx: 4D backprop tensor for input.
26869//	dm: 1D backprop tensor for mean.
26870//	dv: 1D backprop tensor for variance.
26871//	db: 1D backprop tensor for beta.
26872//	dg: 1D backprop tensor for gamma.
26873func BatchNormWithGlobalNormalizationGrad(scope *Scope, t tf.Output, m tf.Output, v tf.Output, gamma tf.Output, backprop tf.Output, variance_epsilon float32, scale_after_normalization bool) (dx tf.Output, dm tf.Output, dv tf.Output, db tf.Output, dg tf.Output) {
26874	if scope.Err() != nil {
26875		return
26876	}
26877	attrs := map[string]interface{}{"variance_epsilon": variance_epsilon, "scale_after_normalization": scale_after_normalization}
26878	opspec := tf.OpSpec{
26879		Type: "BatchNormWithGlobalNormalizationGrad",
26880		Input: []tf.Input{
26881			t, m, v, gamma, backprop,
26882		},
26883		Attrs: attrs,
26884	}
26885	op := scope.AddOperation(opspec)
26886	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
26887}
26888
26889// LoadAndRemapMatrixAttr is an optional argument to LoadAndRemapMatrix.
26890type LoadAndRemapMatrixAttr func(optionalAttr)
26891
26892// LoadAndRemapMatrixMaxRowsInMemory sets the optional max_rows_in_memory attribute to value.
26893//
26894// value: The maximum number of rows to load from the checkpoint at
26895// once. If less than or equal to 0, the entire matrix will be loaded into
26896// memory. Setting this arg trades increased disk reads for lower memory usage.
26897// If not specified, defaults to -1
26898func LoadAndRemapMatrixMaxRowsInMemory(value int64) LoadAndRemapMatrixAttr {
26899	return func(m optionalAttr) {
26900		m["max_rows_in_memory"] = value
26901	}
26902}
26903
26904// Loads a 2-D (matrix) `Tensor` with name `old_tensor_name` from the checkpoint
26905//
26906// at `ckpt_path` and potentially reorders its rows and columns using the
26907// specified remappings.
26908//
26909// Most users should use one of the wrapper initializers (such as
26910// `tf.contrib.framework.load_and_remap_matrix_initializer`) instead of this
26911// function directly.
26912//
26913// The remappings are 1-D tensors with the following properties:
26914//
26915// * `row_remapping` must have exactly `num_rows` entries. Row `i` of the output
26916//   matrix will be initialized from the row corresponding to index
26917//   `row_remapping[i]` in the old `Tensor` from the checkpoint.
26918// * `col_remapping` must have either 0 entries (indicating that no column
26919//   reordering is needed) or `num_cols` entries. If specified, column `j` of the
26920//   output matrix will be initialized from the column corresponding to index
26921//   `col_remapping[j]` in the old `Tensor` from the checkpoint.
26922// * A value of -1 in either of the remappings signifies a "missing" entry. In that
26923//   case, values from the `initializing_values` tensor will be used to fill that
26924//   missing row or column. If `row_remapping` has `r` missing entries and
26925//   `col_remapping` has `c` missing entries, then the following condition must be
26926//   true:
26927//
26928// `(r * num_cols) + (c * num_rows) - (r * c) == len(initializing_values)`
26929//
26930// The remapping tensors can be generated using the GenerateVocabRemapping op.
26931//
26932// As an example, with row_remapping = [1, 0, -1], col_remapping = [0, 2, -1],
26933// initializing_values = [0.5, -0.5, 0.25, -0.25, 42], and w(i, j) representing
26934// the value from row i, column j of the old tensor in the checkpoint, the output
26935// matrix will look like the following:
26936//
26937// [[w(1, 0),  w(1, 2),  0.5],
26938//  [w(0, 0),  w(0, 2), -0.5],
26939//  [0.25,    -0.25,      42]]
26940//
26941// Arguments:
26942//	ckpt_path: Path to the TensorFlow checkpoint (version 2, `TensorBundle`) from
26943// which the old matrix `Tensor` will be loaded.
26944//	old_tensor_name: Name of the 2-D `Tensor` to load from checkpoint.
26945//	row_remapping: An int `Tensor` of row remappings (generally created by
26946// `generate_vocab_remapping`).  Even if no row remapping is needed, this must
26947// still be an index-valued Tensor (e.g. [0, 1, 2, ...]), or a shifted
26948// index-valued `Tensor` (e.g. [8, 9, 10, ...], for partitioned `Variables`).
26949//	col_remapping: An int `Tensor` of column remappings (generally created by
26950// `generate_vocab_remapping`).  May be a size-0 `Tensor` if only row remapping
26951// is to be done (e.g. column ordering is the same).
26952//	initializing_values: A float `Tensor` containing  values to fill in for cells
26953// in the output matrix that are not loaded from the checkpoint. Length must be
26954// exactly the same as the number of missing / new cells.
26955//	num_rows: Number of rows (length of the 1st dimension) in the output matrix.
26956//	num_cols: Number of columns (length of the 2nd dimension) in the output matrix.
26957//
26958// Returns Output matrix containing existing values loaded from the
26959// checkpoint, and with any missing values filled in from initializing_values.
26960func LoadAndRemapMatrix(scope *Scope, ckpt_path tf.Output, old_tensor_name tf.Output, row_remapping tf.Output, col_remapping tf.Output, initializing_values tf.Output, num_rows int64, num_cols int64, optional ...LoadAndRemapMatrixAttr) (output_matrix tf.Output) {
26961	if scope.Err() != nil {
26962		return
26963	}
26964	attrs := map[string]interface{}{"num_rows": num_rows, "num_cols": num_cols}
26965	for _, a := range optional {
26966		a(attrs)
26967	}
26968	opspec := tf.OpSpec{
26969		Type: "LoadAndRemapMatrix",
26970		Input: []tf.Input{
26971			ckpt_path, old_tensor_name, row_remapping, col_remapping, initializing_values,
26972		},
26973		Attrs: attrs,
26974	}
26975	op := scope.AddOperation(opspec)
26976	return op.Output(0)
26977}
26978
26979// Does nothing. Only useful as a placeholder for control edges.
26980//
26981// Returns the created operation.
26982func NoOp(scope *Scope) (o *tf.Operation) {
26983	if scope.Err() != nil {
26984		return
26985	}
26986	opspec := tf.OpSpec{
26987		Type: "NoOp",
26988	}
26989	return scope.AddOperation(opspec)
26990}
26991
26992// Set a summary_writer_interface to record statistics using given stats_aggregator.
26993//
26994// Returns the created operation.
26995func StatsAggregatorSetSummaryWriter(scope *Scope, stats_aggregator tf.Output, summary tf.Output) (o *tf.Operation) {
26996	if scope.Err() != nil {
26997		return
26998	}
26999	opspec := tf.OpSpec{
27000		Type: "StatsAggregatorSetSummaryWriter",
27001		Input: []tf.Input{
27002			stats_aggregator, summary,
27003		},
27004	}
27005	return scope.AddOperation(opspec)
27006}
27007
27008// Subtracts a value from the current value of a variable.
27009//
27010// Any ReadVariableOp with a control dependency on this op is guaranteed to
27011// see the decremented value or a subsequent newer one.
27012//
27013// Arguments:
27014//	resource: handle to the resource in which to store the variable.
27015//	value: the value by which the variable will be incremented.
27016//
27017// Returns the created operation.
27018func AssignSubVariableOp(scope *Scope, resource tf.Output, value tf.Output) (o *tf.Operation) {
27019	if scope.Err() != nil {
27020		return
27021	}
27022	opspec := tf.OpSpec{
27023		Type: "AssignSubVariableOp",
27024		Input: []tf.Input{
27025			resource, value,
27026		},
27027	}
27028	return scope.AddOperation(opspec)
27029}
27030
27031// FusedBatchNormGradAttr is an optional argument to FusedBatchNormGrad.
27032type FusedBatchNormGradAttr func(optionalAttr)
27033
27034// FusedBatchNormGradEpsilon sets the optional epsilon attribute to value.
27035//
27036// value: A small float number added to the variance of x.
27037// If not specified, defaults to 0.0001
27038func FusedBatchNormGradEpsilon(value float32) FusedBatchNormGradAttr {
27039	return func(m optionalAttr) {
27040		m["epsilon"] = value
27041	}
27042}
27043
27044// FusedBatchNormGradDataFormat sets the optional data_format attribute to value.
27045//
27046// value: The data format for y_backprop, x, x_backprop.
27047// Either "NHWC" (default) or "NCHW".
27048// If not specified, defaults to "NHWC"
27049func FusedBatchNormGradDataFormat(value string) FusedBatchNormGradAttr {
27050	return func(m optionalAttr) {
27051		m["data_format"] = value
27052	}
27053}
27054
27055// FusedBatchNormGradIsTraining sets the optional is_training attribute to value.
27056//
27057// value: A bool value to indicate the operation is for training (default)
27058// or inference.
27059// If not specified, defaults to true
27060func FusedBatchNormGradIsTraining(value bool) FusedBatchNormGradAttr {
27061	return func(m optionalAttr) {
27062		m["is_training"] = value
27063	}
27064}
27065
27066// Gradient for batch normalization.
27067//
27068// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
27069// The size of 1D Tensors matches the dimension C of the 4D Tensors.
27070//
27071// Arguments:
27072//	y_backprop: A 4D Tensor for the gradient with respect to y.
27073//	x: A 4D Tensor for input data.
27074//	scale: A 1D Tensor for scaling factor, to scale the normalized x.
27075//	reserve_space_1: When is_training is True, a 1D Tensor for the computed batch
27076// mean to be reused in gradient computation. When is_training is
27077// False, a 1D Tensor for the population mean to be reused in both
27078// 1st and 2nd order gradient computation.
27079//	reserve_space_2: When is_training is True, a 1D Tensor for the computed batch
27080// variance (inverted variance in the cuDNN case) to be reused in
27081// gradient computation. When is_training is False, a 1D Tensor
27082// for the population variance to be reused in both 1st and 2nd
27083// order gradient computation.
27084//
27085// Returns:
27086//	x_backprop: A 4D Tensor for the gradient with respect to x.
27087//	scale_backprop: A 1D Tensor for the gradient with respect to scale.
27088//	offset_backprop: A 1D Tensor for the gradient with respect to offset.
27089//	reserve_space_3: Unused placeholder to match the mean input in FusedBatchNorm.
27090//	reserve_space_4: Unused placeholder to match the variance input
27091// in FusedBatchNorm.
27092func FusedBatchNormGrad(scope *Scope, y_backprop tf.Output, x tf.Output, scale tf.Output, reserve_space_1 tf.Output, reserve_space_2 tf.Output, optional ...FusedBatchNormGradAttr) (x_backprop tf.Output, scale_backprop tf.Output, offset_backprop tf.Output, reserve_space_3 tf.Output, reserve_space_4 tf.Output) {
27093	if scope.Err() != nil {
27094		return
27095	}
27096	attrs := map[string]interface{}{}
27097	for _, a := range optional {
27098		a(attrs)
27099	}
27100	opspec := tf.OpSpec{
27101		Type: "FusedBatchNormGrad",
27102		Input: []tf.Input{
27103			y_backprop, x, scale, reserve_space_1, reserve_space_2,
27104		},
27105		Attrs: attrs,
27106	}
27107	op := scope.AddOperation(opspec)
27108	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
27109}
27110
27111// An op used by XLA SPMD partitioner to switch from automatic partitioning to
27112//
27113// manual partitioning. It annotates the input (full-shape, to be automatically
27114// partitioned) with the same sharding used by manual partitioning, and outputs a
27115// shard-shaped tensor to be consumed by later manually-partitioned ops. If the
27116// shape is not evenly partitionable, the padding region will be masked with 0s.
27117func XlaSpmdFullToShardShape(scope *Scope, input tf.Output, manual_sharding string) (output tf.Output) {
27118	if scope.Err() != nil {
27119		return
27120	}
27121	attrs := map[string]interface{}{"manual_sharding": manual_sharding}
27122	opspec := tf.OpSpec{
27123		Type: "XlaSpmdFullToShardShape",
27124		Input: []tf.Input{
27125			input,
27126		},
27127		Attrs: attrs,
27128	}
27129	op := scope.AddOperation(opspec)
27130	return op.Output(0)
27131}
27132
27133// DecodeCSVAttr is an optional argument to DecodeCSV.
27134type DecodeCSVAttr func(optionalAttr)
27135
27136// DecodeCSVFieldDelim sets the optional field_delim attribute to value.
27137//
27138// value: char delimiter to separate fields in a record.
27139// If not specified, defaults to ","
27140func DecodeCSVFieldDelim(value string) DecodeCSVAttr {
27141	return func(m optionalAttr) {
27142		m["field_delim"] = value
27143	}
27144}
27145
27146// DecodeCSVUseQuoteDelim sets the optional use_quote_delim attribute to value.
27147//
27148// value: If false, treats double quotation marks as regular
27149// characters inside of the string fields (ignoring RFC 4180, Section 2,
27150// Bullet 5).
27151// If not specified, defaults to true
27152func DecodeCSVUseQuoteDelim(value bool) DecodeCSVAttr {
27153	return func(m optionalAttr) {
27154		m["use_quote_delim"] = value
27155	}
27156}
27157
27158// DecodeCSVNaValue sets the optional na_value attribute to value.
27159//
27160// value: Additional string to recognize as NA/NaN.
27161// If not specified, defaults to ""
27162func DecodeCSVNaValue(value string) DecodeCSVAttr {
27163	return func(m optionalAttr) {
27164		m["na_value"] = value
27165	}
27166}
27167
27168// DecodeCSVSelectCols sets the optional select_cols attribute to value.
27169// If not specified, defaults to <>
27170func DecodeCSVSelectCols(value []int64) DecodeCSVAttr {
27171	return func(m optionalAttr) {
27172		m["select_cols"] = value
27173	}
27174}
27175
27176// Convert CSV records to tensors. Each column maps to one tensor.
27177//
27178// RFC 4180 format is expected for the CSV records.
27179// (https://tools.ietf.org/html/rfc4180)
27180// Note that we allow leading and trailing spaces with int or float field.
27181//
27182// Arguments:
27183//	records: Each string is a record/row in the csv and all records should have
27184// the same format.
27185//	record_defaults: One tensor per column of the input record, with either a
27186// scalar default value for that column or an empty vector if the column is
27187// required.
27188//
27189// Returns Each tensor will have the same shape as records.
27190func DecodeCSV(scope *Scope, records tf.Output, record_defaults []tf.Output, optional ...DecodeCSVAttr) (output []tf.Output) {
27191	if scope.Err() != nil {
27192		return
27193	}
27194	attrs := map[string]interface{}{}
27195	for _, a := range optional {
27196		a(attrs)
27197	}
27198	opspec := tf.OpSpec{
27199		Type: "DecodeCSV",
27200		Input: []tf.Input{
27201			records, tf.OutputList(record_defaults),
27202		},
27203		Attrs: attrs,
27204	}
27205	op := scope.AddOperation(opspec)
27206	if scope.Err() != nil {
27207		return
27208	}
27209	var idx int
27210	var err error
27211	if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
27212		scope.UpdateErr("DecodeCSV", err)
27213		return
27214	}
27215	return output
27216}
27217
27218// Convert JSON-encoded Example records to binary protocol buffer strings.
27219//
27220//
27221// Note: This is **not** a general purpose JSON parsing op.
27222//
27223// This op converts JSON-serialized
27224// `tf.train.Example` (created with `json_format.MessageToJson`, following the
27225// [standard JSON mapping](https://developers.google.com/protocol-buffers/docs/proto3#json))
27226// to a binary-serialized `tf.train.Example` (equivalent to
27227// `Example.SerializeToString()`) suitable for conversion to tensors with
27228// `tf.io.parse_example`.
27229//
27230// Arguments:
27231//	json_examples: Each string is a JSON object serialized according to the JSON
27232// mapping of the Example proto.
27233//
27234// Returns Each string is a binary Example protocol buffer corresponding
27235// to the respective element of `json_examples`.
27236func DecodeJSONExample(scope *Scope, json_examples tf.Output) (binary_examples tf.Output) {
27237	if scope.Err() != nil {
27238		return
27239	}
27240	opspec := tf.OpSpec{
27241		Type: "DecodeJSONExample",
27242		Input: []tf.Input{
27243			json_examples,
27244		},
27245	}
27246	op := scope.AddOperation(opspec)
27247	return op.Output(0)
27248}
27249
27250// ParseSequenceExampleAttr is an optional argument to ParseSequenceExample.
27251type ParseSequenceExampleAttr func(optionalAttr)
27252
27253// ParseSequenceExampleNcontextSparse sets the optional Ncontext_sparse attribute to value.
27254// If not specified, defaults to 0
27255//
27256// REQUIRES: value >= 0
27257func ParseSequenceExampleNcontextSparse(value int64) ParseSequenceExampleAttr {
27258	return func(m optionalAttr) {
27259		m["Ncontext_sparse"] = value
27260	}
27261}
27262
27263// ParseSequenceExampleNcontextDense sets the optional Ncontext_dense attribute to value.
27264// If not specified, defaults to 0
27265//
27266// REQUIRES: value >= 0
27267func ParseSequenceExampleNcontextDense(value int64) ParseSequenceExampleAttr {
27268	return func(m optionalAttr) {
27269		m["Ncontext_dense"] = value
27270	}
27271}
27272
27273// ParseSequenceExampleNfeatureListSparse sets the optional Nfeature_list_sparse attribute to value.
27274// If not specified, defaults to 0
27275//
27276// REQUIRES: value >= 0
27277func ParseSequenceExampleNfeatureListSparse(value int64) ParseSequenceExampleAttr {
27278	return func(m optionalAttr) {
27279		m["Nfeature_list_sparse"] = value
27280	}
27281}
27282
27283// ParseSequenceExampleNfeatureListDense sets the optional Nfeature_list_dense attribute to value.
27284// If not specified, defaults to 0
27285//
27286// REQUIRES: value >= 0
27287func ParseSequenceExampleNfeatureListDense(value int64) ParseSequenceExampleAttr {
27288	return func(m optionalAttr) {
27289		m["Nfeature_list_dense"] = value
27290	}
27291}
27292
27293// ParseSequenceExampleContextSparseTypes sets the optional context_sparse_types attribute to value.
27294//
27295// value: A list of Ncontext_sparse types; the data types of data in
27296// each context Feature given in context_sparse_keys.
27297// Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),
27298// DT_INT64 (Int64List), and DT_STRING (BytesList).
27299// If not specified, defaults to <>
27300//
27301// REQUIRES: len(value) >= 0
27302func ParseSequenceExampleContextSparseTypes(value []tf.DataType) ParseSequenceExampleAttr {
27303	return func(m optionalAttr) {
27304		m["context_sparse_types"] = value
27305	}
27306}
27307
27308// ParseSequenceExampleFeatureListDenseTypes sets the optional feature_list_dense_types attribute to value.
27309// If not specified, defaults to <>
27310//
27311// REQUIRES: len(value) >= 0
27312func ParseSequenceExampleFeatureListDenseTypes(value []tf.DataType) ParseSequenceExampleAttr {
27313	return func(m optionalAttr) {
27314		m["feature_list_dense_types"] = value
27315	}
27316}
27317
27318// ParseSequenceExampleContextDenseShapes sets the optional context_dense_shapes attribute to value.
27319//
27320// value: A list of Ncontext_dense shapes; the shapes of data in
27321// each context Feature given in context_dense_keys.
27322// The number of elements in the Feature corresponding to context_dense_key[j]
27323// must always equal context_dense_shapes[j].NumEntries().
27324// The shape of context_dense_values[j] will match context_dense_shapes[j].
27325// If not specified, defaults to <>
27326//
27327// REQUIRES: len(value) >= 0
27328func ParseSequenceExampleContextDenseShapes(value []tf.Shape) ParseSequenceExampleAttr {
27329	return func(m optionalAttr) {
27330		m["context_dense_shapes"] = value
27331	}
27332}
27333
27334// ParseSequenceExampleFeatureListSparseTypes sets the optional feature_list_sparse_types attribute to value.
27335//
27336// value: A list of Nfeature_list_sparse types; the data types
27337// of data in each FeatureList given in feature_list_sparse_keys.
27338// Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),
27339// DT_INT64 (Int64List), and DT_STRING (BytesList).
27340// If not specified, defaults to <>
27341//
27342// REQUIRES: len(value) >= 0
27343func ParseSequenceExampleFeatureListSparseTypes(value []tf.DataType) ParseSequenceExampleAttr {
27344	return func(m optionalAttr) {
27345		m["feature_list_sparse_types"] = value
27346	}
27347}
27348
27349// ParseSequenceExampleFeatureListDenseShapes sets the optional feature_list_dense_shapes attribute to value.
27350//
27351// value: A list of Nfeature_list_dense shapes; the shapes of
27352// data in each FeatureList given in feature_list_dense_keys.
27353// The shape of each Feature in the FeatureList corresponding to
27354// feature_list_dense_key[j] must always equal
27355// feature_list_dense_shapes[j].NumEntries().
27356// If not specified, defaults to <>
27357//
27358// REQUIRES: len(value) >= 0
27359func ParseSequenceExampleFeatureListDenseShapes(value []tf.Shape) ParseSequenceExampleAttr {
27360	return func(m optionalAttr) {
27361		m["feature_list_dense_shapes"] = value
27362	}
27363}
27364
27365// Transforms a vector of brain.SequenceExample protos (as strings) into typed tensors.
27366//
27367// Arguments:
27368//	serialized: A vector containing binary serialized SequenceExample protos.
27369//	debug_name: A vector containing the names of the serialized protos.
27370// May contain, for example, table key (descriptive) name for the
27371// corresponding serialized proto.  This is purely useful for debugging
27372// purposes, and the presence of values here has no effect on the output.
27373// May also be an empty vector if no name is available.
27374//	context_dense_defaults: A list of Ncontext_dense Tensors (some may be empty).
27375// context_dense_defaults[j] provides default values
27376// when the SequenceExample's context map lacks context_dense_key[j].
27377// If an empty Tensor is provided for context_dense_defaults[j],
27378// then the Feature context_dense_keys[j] is required.
27379// The input type is inferred from context_dense_defaults[j], even when it's
27380// empty.  If context_dense_defaults[j] is not empty, its shape must match
27381// context_dense_shapes[j].
27382//	feature_list_dense_missing_assumed_empty: A vector listing the
27383// FeatureList keys which may be missing from the SequenceExamples.  If the
27384// associated FeatureList is missing, it is treated as empty.  By default,
27385// any FeatureList not listed in this vector must exist in the SequenceExamples.
27386//	context_sparse_keys: A list of Ncontext_sparse string Tensors (scalars).
27387// The keys expected in the Examples' features associated with context_sparse
27388// values.
27389//	context_dense_keys: A list of Ncontext_dense string Tensors (scalars).
27390// The keys expected in the SequenceExamples' context features associated with
27391// dense values.
27392//	feature_list_sparse_keys: A list of Nfeature_list_sparse string Tensors
27393// (scalars).  The keys expected in the FeatureLists associated with sparse
27394// values.
27395//	feature_list_dense_keys: A list of Nfeature_list_dense string Tensors (scalars).
27396// The keys expected in the SequenceExamples' feature_lists associated
27397// with lists of dense values.
27398func ParseSequenceExample(scope *Scope, serialized tf.Output, debug_name tf.Output, context_dense_defaults []tf.Output, feature_list_dense_missing_assumed_empty []string, context_sparse_keys []string, context_dense_keys []string, feature_list_sparse_keys []string, feature_list_dense_keys []string, optional ...ParseSequenceExampleAttr) (context_sparse_indices []tf.Output, context_sparse_values []tf.Output, context_sparse_shapes []tf.Output, context_dense_values []tf.Output, feature_list_sparse_indices []tf.Output, feature_list_sparse_values []tf.Output, feature_list_sparse_shapes []tf.Output, feature_list_dense_values []tf.Output, feature_list_dense_lengths []tf.Output) {
27399	if scope.Err() != nil {
27400		return
27401	}
27402	attrs := map[string]interface{}{"feature_list_dense_missing_assumed_empty": feature_list_dense_missing_assumed_empty, "context_sparse_keys": context_sparse_keys, "context_dense_keys": context_dense_keys, "feature_list_sparse_keys": feature_list_sparse_keys, "feature_list_dense_keys": feature_list_dense_keys}
27403	for _, a := range optional {
27404		a(attrs)
27405	}
27406	opspec := tf.OpSpec{
27407		Type: "ParseSequenceExample",
27408		Input: []tf.Input{
27409			serialized, debug_name, tf.OutputList(context_dense_defaults),
27410		},
27411		Attrs: attrs,
27412	}
27413	op := scope.AddOperation(opspec)
27414	if scope.Err() != nil {
27415		return
27416	}
27417	var idx int
27418	var err error
27419	if context_sparse_indices, idx, err = makeOutputList(op, idx, "context_sparse_indices"); err != nil {
27420		scope.UpdateErr("ParseSequenceExample", err)
27421		return
27422	}
27423	if context_sparse_values, idx, err = makeOutputList(op, idx, "context_sparse_values"); err != nil {
27424		scope.UpdateErr("ParseSequenceExample", err)
27425		return
27426	}
27427	if context_sparse_shapes, idx, err = makeOutputList(op, idx, "context_sparse_shapes"); err != nil {
27428		scope.UpdateErr("ParseSequenceExample", err)
27429		return
27430	}
27431	if context_dense_values, idx, err = makeOutputList(op, idx, "context_dense_values"); err != nil {
27432		scope.UpdateErr("ParseSequenceExample", err)
27433		return
27434	}
27435	if feature_list_sparse_indices, idx, err = makeOutputList(op, idx, "feature_list_sparse_indices"); err != nil {
27436		scope.UpdateErr("ParseSequenceExample", err)
27437		return
27438	}
27439	if feature_list_sparse_values, idx, err = makeOutputList(op, idx, "feature_list_sparse_values"); err != nil {
27440		scope.UpdateErr("ParseSequenceExample", err)
27441		return
27442	}
27443	if feature_list_sparse_shapes, idx, err = makeOutputList(op, idx, "feature_list_sparse_shapes"); err != nil {
27444		scope.UpdateErr("ParseSequenceExample", err)
27445		return
27446	}
27447	if feature_list_dense_values, idx, err = makeOutputList(op, idx, "feature_list_dense_values"); err != nil {
27448		scope.UpdateErr("ParseSequenceExample", err)
27449		return
27450	}
27451	if feature_list_dense_lengths, idx, err = makeOutputList(op, idx, "feature_list_dense_lengths"); err != nil {
27452		scope.UpdateErr("ParseSequenceExample", err)
27453		return
27454	}
27455	return context_sparse_indices, context_sparse_values, context_sparse_shapes, context_dense_values, feature_list_sparse_indices, feature_list_sparse_values, feature_list_sparse_shapes, feature_list_dense_values, feature_list_dense_lengths
27456}
27457
27458// UniqueV2Attr is an optional argument to UniqueV2.
27459type UniqueV2Attr func(optionalAttr)
27460
27461// UniqueV2OutIdx sets the optional out_idx attribute to value.
27462// If not specified, defaults to DT_INT32
27463func UniqueV2OutIdx(value tf.DataType) UniqueV2Attr {
27464	return func(m optionalAttr) {
27465		m["out_idx"] = value
27466	}
27467}
27468
27469// Finds unique elements along an axis of a tensor.
27470//
27471// This operation either returns a tensor `y` containing unique elements
27472// along the `axis` of a tensor. The returned unique elements is sorted
27473// in the same order as they occur along `axis` in `x`.
27474// This operation also returns a tensor `idx` that is the same size as
27475// the number of the elements in `x` along the `axis` dimension. It
27476// contains the index in the unique output `y`.
27477// In other words, for an `1-D` tensor `x` with `axis = None:
27478//
27479// `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
27480//
27481// For example:
27482//
27483// ```
27484// # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
27485// y, idx = unique(x)
27486// y ==> [1, 2, 4, 7, 8]
27487// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
27488// ```
27489//
27490// For an `2-D` tensor `x` with `axis = 0`:
27491//
27492// ```
27493// # tensor 'x' is [[1, 0, 0],
27494// #                [1, 0, 0],
27495// #                [2, 0, 0]]
27496// y, idx = unique(x, axis=0)
27497// y ==> [[1, 0, 0],
27498//        [2, 0, 0]]
27499// idx ==> [0, 0, 1]
27500// ```
27501//
27502// For an `2-D` tensor `x` with `axis = 1`:
27503//
27504// ```
27505// # tensor 'x' is [[1, 0, 0],
27506// #                [1, 0, 0],
27507// #                [2, 0, 0]]
27508// y, idx = unique(x, axis=1)
27509// y ==> [[1, 0],
27510//        [1, 0],
27511//        [2, 0]]
27512// idx ==> [0, 1, 1]
27513// ```
27514//
27515// Arguments:
27516//	x: A `Tensor`.
27517//	axis: A `Tensor` of type `int32` (default: None). The axis of the Tensor to
27518// find the unique elements.
27519//
27520// Returns:
27521//	y: A `Tensor`. Unique elements along the `axis` of `Tensor` x.
27522//	idx: A 1-D Tensor. Has the same type as x that contains the index of each
27523// value of x in the output y.
27524func UniqueV2(scope *Scope, x tf.Output, axis tf.Output, optional ...UniqueV2Attr) (y tf.Output, idx tf.Output) {
27525	if scope.Err() != nil {
27526		return
27527	}
27528	attrs := map[string]interface{}{}
27529	for _, a := range optional {
27530		a(attrs)
27531	}
27532	opspec := tf.OpSpec{
27533		Type: "UniqueV2",
27534		Input: []tf.Input{
27535			x, axis,
27536		},
27537		Attrs: attrs,
27538	}
27539	op := scope.AddOperation(opspec)
27540	return op.Output(0), op.Output(1)
27541}
27542
27543// RetrieveTPUEmbeddingADAMParametersAttr is an optional argument to RetrieveTPUEmbeddingADAMParameters.
27544type RetrieveTPUEmbeddingADAMParametersAttr func(optionalAttr)
27545
27546// RetrieveTPUEmbeddingADAMParametersTableId sets the optional table_id attribute to value.
27547// If not specified, defaults to -1
27548func RetrieveTPUEmbeddingADAMParametersTableId(value int64) RetrieveTPUEmbeddingADAMParametersAttr {
27549	return func(m optionalAttr) {
27550		m["table_id"] = value
27551	}
27552}
27553
27554// RetrieveTPUEmbeddingADAMParametersTableName sets the optional table_name attribute to value.
27555// If not specified, defaults to ""
27556func RetrieveTPUEmbeddingADAMParametersTableName(value string) RetrieveTPUEmbeddingADAMParametersAttr {
27557	return func(m optionalAttr) {
27558		m["table_name"] = value
27559	}
27560}
27561
27562// RetrieveTPUEmbeddingADAMParametersConfig sets the optional config attribute to value.
27563// If not specified, defaults to ""
27564func RetrieveTPUEmbeddingADAMParametersConfig(value string) RetrieveTPUEmbeddingADAMParametersAttr {
27565	return func(m optionalAttr) {
27566		m["config"] = value
27567	}
27568}
27569
27570// Retrieve ADAM embedding parameters.
27571//
27572// An op that retrieves optimization parameters from embedding to host
27573// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
27574// the correct embedding table configuration. For example, this op is
27575// used to retrieve updated parameters before saving a checkpoint.
27576//
27577// Returns:
27578//	parameters: Parameter parameters updated by the ADAM optimization algorithm.
27579//	momenta: Parameter momenta updated by the ADAM optimization algorithm.
27580//	velocities: Parameter velocities updated by the ADAM optimization algorithm.
27581func RetrieveTPUEmbeddingADAMParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingADAMParametersAttr) (parameters tf.Output, momenta tf.Output, velocities tf.Output) {
27582	if scope.Err() != nil {
27583		return
27584	}
27585	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
27586	for _, a := range optional {
27587		a(attrs)
27588	}
27589	opspec := tf.OpSpec{
27590		Type: "RetrieveTPUEmbeddingADAMParameters",
27591
27592		Attrs: attrs,
27593	}
27594	op := scope.AddOperation(opspec)
27595	return op.Output(0), op.Output(1), op.Output(2)
27596}
27597
27598// StatelessRandomBinomialAttr is an optional argument to StatelessRandomBinomial.
27599type StatelessRandomBinomialAttr func(optionalAttr)
27600
27601// StatelessRandomBinomialDtype sets the optional dtype attribute to value.
27602//
27603// value: The type of the output.
27604// If not specified, defaults to DT_INT64
27605func StatelessRandomBinomialDtype(value tf.DataType) StatelessRandomBinomialAttr {
27606	return func(m optionalAttr) {
27607		m["dtype"] = value
27608	}
27609}
27610
27611// Outputs deterministic pseudorandom random numbers from a binomial distribution.
27612//
27613// Outputs random values from a binomial distribution.
27614//
27615// The outputs are a deterministic function of `shape`, `seed`, `counts`, and `probs`.
27616//
27617// Arguments:
27618//	shape: The shape of the output tensor.
27619//	seed: 2 seeds (shape [2]).
27620//	counts: The counts of the binomial distribution. Must be broadcastable with `probs`,
27621// and broadcastable with the rightmost dimensions of `shape`.
27622//	probs: The probability of success for the binomial distribution. Must be broadcastable
27623// with `counts` and broadcastable with the rightmost dimensions of `shape`.
27624//
27625// Returns Random values with specified shape.
27626func StatelessRandomBinomial(scope *Scope, shape tf.Output, seed tf.Output, counts tf.Output, probs tf.Output, optional ...StatelessRandomBinomialAttr) (output tf.Output) {
27627	if scope.Err() != nil {
27628		return
27629	}
27630	attrs := map[string]interface{}{}
27631	for _, a := range optional {
27632		a(attrs)
27633	}
27634	opspec := tf.OpSpec{
27635		Type: "StatelessRandomBinomial",
27636		Input: []tf.Input{
27637			shape, seed, counts, probs,
27638		},
27639		Attrs: attrs,
27640	}
27641	op := scope.AddOperation(opspec)
27642	return op.Output(0)
27643}
27644
27645// DecodePaddedRawAttr is an optional argument to DecodePaddedRaw.
27646type DecodePaddedRawAttr func(optionalAttr)
27647
27648// DecodePaddedRawLittleEndian sets the optional little_endian attribute to value.
27649//
27650// value: Whether the input `input_bytes` is in little-endian order. Ignored for
27651// `out_type` values that are stored in a single byte, like `uint8`
27652// If not specified, defaults to true
27653func DecodePaddedRawLittleEndian(value bool) DecodePaddedRawAttr {
27654	return func(m optionalAttr) {
27655		m["little_endian"] = value
27656	}
27657}
27658
27659// Reinterpret the bytes of a string as a vector of numbers.
27660//
27661// Arguments:
27662//	input_bytes: Tensor of string to be decoded.
27663//	fixed_length: Length in bytes for each element of the decoded output. Must be a multiple
27664// of the size of the output type.
27665//
27666//
27667// Returns A Tensor with one more dimension than the input `bytes`. The added dimension
27668// will have size equal to the length of the elements of `bytes` divided by the
27669// number of bytes to represent `out_type`.
27670func DecodePaddedRaw(scope *Scope, input_bytes tf.Output, fixed_length tf.Output, out_type tf.DataType, optional ...DecodePaddedRawAttr) (output tf.Output) {
27671	if scope.Err() != nil {
27672		return
27673	}
27674	attrs := map[string]interface{}{"out_type": out_type}
27675	for _, a := range optional {
27676		a(attrs)
27677	}
27678	opspec := tf.OpSpec{
27679		Type: "DecodePaddedRaw",
27680		Input: []tf.Input{
27681			input_bytes, fixed_length,
27682		},
27683		Attrs: attrs,
27684	}
27685	op := scope.AddOperation(opspec)
27686	return op.Output(0)
27687}
27688
27689// Elementwise computes the bitwise left-shift of `x` and `y`.
27690//
27691// If `y` is negative, or greater than or equal to the width of `x` in bits the
27692// result is implementation defined.
27693//
27694// Example:
27695//
27696// ```python
27697// import tensorflow as tf
27698// from tensorflow.python.ops import bitwise_ops
27699// import numpy as np
27700// dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64]
27701//
27702// for dtype in dtype_list:
27703//   lhs = tf.constant([-1, -5, -3, -14], dtype=dtype)
27704//   rhs = tf.constant([5, 0, 7, 11], dtype=dtype)
27705//
27706//   left_shift_result = bitwise_ops.left_shift(lhs, rhs)
27707//
27708//   print(left_shift_result)
27709//
27710// # This will print:
27711// # tf.Tensor([ -32   -5 -128    0], shape=(4,), dtype=int8)
27712// # tf.Tensor([   -32     -5   -384 -28672], shape=(4,), dtype=int16)
27713// # tf.Tensor([   -32     -5   -384 -28672], shape=(4,), dtype=int32)
27714// # tf.Tensor([   -32     -5   -384 -28672], shape=(4,), dtype=int64)
27715//
27716// lhs = np.array([-2, 64, 101, 32], dtype=np.int8)
27717// rhs = np.array([-1, -5, -3, -14], dtype=np.int8)
27718// bitwise_ops.left_shift(lhs, rhs)
27719// # <tf.Tensor: shape=(4,), dtype=int8, numpy=array([ -2,  64, 101,  32], dtype=int8)>
27720// ```
27721//
27722func LeftShift(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
27723	if scope.Err() != nil {
27724		return
27725	}
27726	opspec := tf.OpSpec{
27727		Type: "LeftShift",
27728		Input: []tf.Input{
27729			x, y,
27730		},
27731	}
27732	op := scope.AddOperation(opspec)
27733	return op.Output(0)
27734}
27735
27736// Generates a feature cross from a list of tensors, and returns it as a
27737// RaggedTensor.  See `tf.ragged.cross` for more details.
27738//
27739// Arguments:
27740//	ragged_values: The values tensor for each RaggedTensor input.
27741//	ragged_row_splits: The row_splits tensor for each RaggedTensor input.
27742//	sparse_indices: The indices tensor for each SparseTensor input.
27743//	sparse_values: The values tensor for each SparseTensor input.
27744//	sparse_shape: The dense_shape tensor for each SparseTensor input.
27745//	dense_inputs: The tf.Tensor inputs.
27746//	input_order: String specifying the tensor type for each input.  The `i`th character in
27747// this string specifies the type of the `i`th input, and is one of: 'R' (ragged),
27748// 'D' (dense), or 'S' (sparse).  This attr is used to ensure that the crossed
27749// values are combined in the order of the inputs from the call to tf.ragged.cross.
27750//
27751//
27752//
27753//
27754//
27755//
27756// Returns:
27757//	output_values: The `values` for the returned `RaggedTensor`.
27758//	output_row_splits: The `row_splits` for the returned `RaggedTensor`.
27759func RaggedCross(scope *Scope, ragged_values []tf.Output, ragged_row_splits []tf.Output, sparse_indices []tf.Output, sparse_values []tf.Output, sparse_shape []tf.Output, dense_inputs []tf.Output, input_order string, hashed_output bool, num_buckets int64, hash_key int64, out_values_type tf.DataType, out_row_splits_type tf.DataType) (output_values tf.Output, output_row_splits tf.Output) {
27760	if scope.Err() != nil {
27761		return
27762	}
27763	attrs := map[string]interface{}{"input_order": input_order, "hashed_output": hashed_output, "num_buckets": num_buckets, "hash_key": hash_key, "out_values_type": out_values_type, "out_row_splits_type": out_row_splits_type}
27764	opspec := tf.OpSpec{
27765		Type: "RaggedCross",
27766		Input: []tf.Input{
27767			tf.OutputList(ragged_values), tf.OutputList(ragged_row_splits), tf.OutputList(sparse_indices), tf.OutputList(sparse_values), tf.OutputList(sparse_shape), tf.OutputList(dense_inputs),
27768		},
27769		Attrs: attrs,
27770	}
27771	op := scope.AddOperation(opspec)
27772	return op.Output(0), op.Output(1)
27773}
27774
27775// BatchMatMulAttr is an optional argument to BatchMatMul.
27776type BatchMatMulAttr func(optionalAttr)
27777
27778// BatchMatMulAdjX sets the optional adj_x attribute to value.
27779//
27780// value: If `True`, adjoint the slices of `x`. Defaults to `False`.
27781// If not specified, defaults to false
27782func BatchMatMulAdjX(value bool) BatchMatMulAttr {
27783	return func(m optionalAttr) {
27784		m["adj_x"] = value
27785	}
27786}
27787
27788// BatchMatMulAdjY sets the optional adj_y attribute to value.
27789//
27790// value: If `True`, adjoint the slices of `y`. Defaults to `False`.
27791// If not specified, defaults to false
27792func BatchMatMulAdjY(value bool) BatchMatMulAttr {
27793	return func(m optionalAttr) {
27794		m["adj_y"] = value
27795	}
27796}
27797
27798// Multiplies slices of two tensors in batches.
27799//
27800// Multiplies all slices of `Tensor` `x` and `y` (each slice can be
27801// viewed as an element of a batch), and arranges the individual results
27802// in a single output tensor of the same batch size. Each of the
27803// individual slices can optionally be adjointed (to adjoint a matrix
27804// means to transpose and conjugate it) before multiplication by setting
27805// the `adj_x` or `adj_y` flag to `True`, which are by default `False`.
27806//
27807// The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]`
27808// and `[..., r_y, c_y]`.
27809//
27810// The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where:
27811//
27812//     r_o = c_x if adj_x else r_x
27813//     c_o = r_y if adj_y else c_y
27814//
27815// It is computed as:
27816//
27817//     output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])
27818//
27819// Arguments:
27820//	x: 2-D or higher with shape `[..., r_x, c_x]`.
27821//	y: 2-D or higher with shape `[..., r_y, c_y]`.
27822//
27823// Returns 3-D or higher with shape `[..., r_o, c_o]`
27824func BatchMatMul(scope *Scope, x tf.Output, y tf.Output, optional ...BatchMatMulAttr) (output tf.Output) {
27825	if scope.Err() != nil {
27826		return
27827	}
27828	attrs := map[string]interface{}{}
27829	for _, a := range optional {
27830		a(attrs)
27831	}
27832	opspec := tf.OpSpec{
27833		Type: "BatchMatMul",
27834		Input: []tf.Input{
27835			x, y,
27836		},
27837		Attrs: attrs,
27838	}
27839	op := scope.AddOperation(opspec)
27840	return op.Output(0)
27841}
27842
27843// RaggedTensorFromVariantAttr is an optional argument to RaggedTensorFromVariant.
27844type RaggedTensorFromVariantAttr func(optionalAttr)
27845
27846// RaggedTensorFromVariantTsplits sets the optional Tsplits attribute to value.
27847// If not specified, defaults to DT_INT64
27848func RaggedTensorFromVariantTsplits(value tf.DataType) RaggedTensorFromVariantAttr {
27849	return func(m optionalAttr) {
27850		m["Tsplits"] = value
27851	}
27852}
27853
27854// Decodes a `variant` Tensor into a `RaggedTensor`.
27855//
27856// Decodes the given `variant` Tensor and returns a `RaggedTensor`. The input
27857// could be a scalar, meaning it encodes a single `RaggedTensor` with ragged_rank
27858// `output_ragged_rank`. It could also have an arbitrary rank, in which case each
27859// element is decoded into a `RaggedTensor` with ragged_rank `input_ragged_rank`
27860// and these are then stacked according to the input shape to output a single
27861// `RaggedTensor` with ragged_rank `output_ragged_rank`. Each `variant` element in
27862// the input Tensor is decoded by retrieving from the element a 1-D `variant`
27863// Tensor with `input_ragged_rank + 1` Tensors, corresponding to the splits and
27864// values of the decoded `RaggedTensor`. If `input_ragged_rank` is -1, then it is
27865// inferred as `output_ragged_rank` - `rank(encoded_ragged)`. See
27866// `RaggedTensorToVariant` for the corresponding encoding logic.
27867//
27868//
27869// Arguments:
27870//	encoded_ragged: A `variant` Tensor containing encoded `RaggedTensor`s.
27871//	input_ragged_rank: The ragged rank of each encoded `RaggedTensor` component in the input. If set to
27872// -1, this is inferred as `output_ragged_rank` - `rank(encoded_ragged)`
27873//	output_ragged_rank: The expected ragged rank of the output `RaggedTensor`. The following must hold:
27874// `output_ragged_rank = rank(encoded_ragged) + input_ragged_rank`.
27875//
27876//
27877// Returns:
27878//	output_nested_splits: A list of one or more Tensors representing the splits of the output
27879// `RaggedTensor`.
27880//	output_dense_values: A Tensor representing the values of the output `RaggedTensor`.
27881func RaggedTensorFromVariant(scope *Scope, encoded_ragged tf.Output, input_ragged_rank int64, output_ragged_rank int64, Tvalues tf.DataType, optional ...RaggedTensorFromVariantAttr) (output_nested_splits []tf.Output, output_dense_values tf.Output) {
27882	if scope.Err() != nil {
27883		return
27884	}
27885	attrs := map[string]interface{}{"input_ragged_rank": input_ragged_rank, "output_ragged_rank": output_ragged_rank, "Tvalues": Tvalues}
27886	for _, a := range optional {
27887		a(attrs)
27888	}
27889	opspec := tf.OpSpec{
27890		Type: "RaggedTensorFromVariant",
27891		Input: []tf.Input{
27892			encoded_ragged,
27893		},
27894		Attrs: attrs,
27895	}
27896	op := scope.AddOperation(opspec)
27897	if scope.Err() != nil {
27898		return
27899	}
27900	var idx int
27901	var err error
27902	if output_nested_splits, idx, err = makeOutputList(op, idx, "output_nested_splits"); err != nil {
27903		scope.UpdateErr("RaggedTensorFromVariant", err)
27904		return
27905	}
27906	output_dense_values = op.Output(idx)
27907	return output_nested_splits, output_dense_values
27908}
27909
27910// Returns the name of the device on which `resource` has been placed.
27911func ExperimentalIteratorGetDevice(scope *Scope, resource tf.Output) (device tf.Output) {
27912	if scope.Err() != nil {
27913		return
27914	}
27915	opspec := tf.OpSpec{
27916		Type: "ExperimentalIteratorGetDevice",
27917		Input: []tf.Input{
27918			resource,
27919		},
27920	}
27921	op := scope.AddOperation(opspec)
27922	return op.Output(0)
27923}
27924
27925// Records the bytes size of each element of `input_dataset` in a StatsAggregator.
27926func ExperimentalBytesProducedStatsDataset(scope *Scope, input_dataset tf.Output, tag tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
27927	if scope.Err() != nil {
27928		return
27929	}
27930	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
27931	opspec := tf.OpSpec{
27932		Type: "ExperimentalBytesProducedStatsDataset",
27933		Input: []tf.Input{
27934			input_dataset, tag,
27935		},
27936		Attrs: attrs,
27937	}
27938	op := scope.AddOperation(opspec)
27939	return op.Output(0)
27940}
27941
27942// Computes exponential linear: `exp(features) - 1` if < 0, `features` otherwise.
27943//
27944// See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)
27945// ](http://arxiv.org/abs/1511.07289)
27946func Elu(scope *Scope, features tf.Output) (activations tf.Output) {
27947	if scope.Err() != nil {
27948		return
27949	}
27950	opspec := tf.OpSpec{
27951		Type: "Elu",
27952		Input: []tf.Input{
27953			features,
27954		},
27955	}
27956	op := scope.AddOperation(opspec)
27957	return op.Output(0)
27958}
27959
27960// AddSparseToTensorsMapAttr is an optional argument to AddSparseToTensorsMap.
27961type AddSparseToTensorsMapAttr func(optionalAttr)
27962
27963// AddSparseToTensorsMapContainer sets the optional container attribute to value.
27964//
27965// value: The container name for the `SparseTensorsMap` created by this op.
27966// If not specified, defaults to ""
27967func AddSparseToTensorsMapContainer(value string) AddSparseToTensorsMapAttr {
27968	return func(m optionalAttr) {
27969		m["container"] = value
27970	}
27971}
27972
27973// AddSparseToTensorsMapSharedName sets the optional shared_name attribute to value.
27974//
27975// value: The shared name for the `SparseTensorsMap` created by this op.
27976// If blank, the new Operation's unique name is used.
27977// If not specified, defaults to ""
27978func AddSparseToTensorsMapSharedName(value string) AddSparseToTensorsMapAttr {
27979	return func(m optionalAttr) {
27980		m["shared_name"] = value
27981	}
27982}
27983
27984// Add a `SparseTensor` to a `SparseTensorsMap` return its handle.
27985//
27986// A `SparseTensor` is represented by three tensors: `sparse_indices`,
27987// `sparse_values`, and `sparse_shape`.
27988//
27989// This operator takes the given `SparseTensor` and adds it to a container
27990// object (a `SparseTensorsMap`).  A unique key within this container is generated
27991// in the form of an `int64`, and this is the value that is returned.
27992//
27993// The `SparseTensor` can then be read out as part of a minibatch by passing
27994// the key as a vector element to `TakeManySparseFromTensorsMap`.  To ensure
27995// the correct `SparseTensorsMap` is accessed, ensure that the same
27996// `container` and `shared_name` are passed to that Op.  If no `shared_name`
27997// is provided here, instead use the *name* of the Operation created by calling
27998// `AddSparseToTensorsMap` as the `shared_name` passed to
27999// `TakeManySparseFromTensorsMap`.  Ensure the Operations are colocated.
28000//
28001// Arguments:
28002//	sparse_indices: 2-D.  The `indices` of the `SparseTensor`.
28003//	sparse_values: 1-D.  The `values` of the `SparseTensor`.
28004//	sparse_shape: 1-D.  The `shape` of the `SparseTensor`.
28005//
28006// Returns 0-D.  The handle of the `SparseTensor` now stored in the
28007// `SparseTensorsMap`.
28008func AddSparseToTensorsMap(scope *Scope, sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output, optional ...AddSparseToTensorsMapAttr) (sparse_handle tf.Output) {
28009	if scope.Err() != nil {
28010		return
28011	}
28012	attrs := map[string]interface{}{}
28013	for _, a := range optional {
28014		a(attrs)
28015	}
28016	opspec := tf.OpSpec{
28017		Type: "AddSparseToTensorsMap",
28018		Input: []tf.Input{
28019			sparse_indices, sparse_values, sparse_shape,
28020		},
28021		Attrs: attrs,
28022	}
28023	op := scope.AddOperation(opspec)
28024	return op.Output(0)
28025}
28026
28027// Transforms a vector of tf.Example protos (as strings) into typed tensors.
28028//
28029// Arguments:
28030//	serialized: A scalar or vector containing binary serialized Example protos.
28031//	names: A tensor containing the names of the serialized protos.
28032// Corresponds 1:1 with the `serialized` tensor.
28033// May contain, for example, table key (descriptive) names for the
28034// corresponding serialized protos.  These are purely useful for debugging
28035// purposes, and the presence of values here has no effect on the output.
28036// May also be an empty vector if no names are available.
28037// If non-empty, this tensor must have the same shape as "serialized".
28038//	sparse_keys: Vector of strings.
28039// The keys expected in the Examples' features associated with sparse values.
28040//	dense_keys: Vector of strings.
28041// The keys expected in the Examples' features associated with dense values.
28042//	ragged_keys: Vector of strings.
28043// The keys expected in the Examples' features associated with ragged values.
28044//	dense_defaults: A list of Tensors (some may be empty).  Corresponds 1:1 with `dense_keys`.
28045// dense_defaults[j] provides default values
28046// when the example's feature_map lacks dense_key[j].  If an empty Tensor is
28047// provided for dense_defaults[j], then the Feature dense_keys[j] is required.
28048// The input type is inferred from dense_defaults[j], even when it's empty.
28049// If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined,
28050// then the shape of dense_defaults[j] must match that of dense_shapes[j].
28051// If dense_shapes[j] has an undefined major dimension (variable strides dense
28052// feature), dense_defaults[j] must contain a single element:
28053// the padding element.
28054//	num_sparse: The number of sparse keys.
28055//	sparse_types: A list of `num_sparse` types; the data types of data in each Feature
28056// given in sparse_keys.
28057// Currently the ParseExample supports DT_FLOAT (FloatList),
28058// DT_INT64 (Int64List), and DT_STRING (BytesList).
28059//	ragged_value_types: A list of `num_ragged` types; the data types of data in each Feature
28060// given in ragged_keys (where `num_ragged = sparse_keys.size()`).
28061// Currently the ParseExample supports DT_FLOAT (FloatList),
28062// DT_INT64 (Int64List), and DT_STRING (BytesList).
28063//	ragged_split_types: A list of `num_ragged` types; the data types of row_splits in each Feature
28064// given in ragged_keys (where `num_ragged = sparse_keys.size()`).
28065// May be DT_INT32 or DT_INT64.
28066//	dense_shapes: A list of `num_dense` shapes; the shapes of data in each Feature
28067// given in dense_keys (where `num_dense = dense_keys.size()`).
28068// The number of elements in the Feature corresponding to dense_key[j]
28069// must always equal dense_shapes[j].NumEntries().
28070// If dense_shapes[j] == (D0, D1, ..., DN) then the shape of output
28071// Tensor dense_values[j] will be (|serialized|, D0, D1, ..., DN):
28072// The dense outputs are just the inputs row-stacked by batch.
28073// This works for dense_shapes[j] = (-1, D1, ..., DN).  In this case
28074// the shape of the output Tensor dense_values[j] will be
28075// (|serialized|, M, D1, .., DN), where M is the maximum number of blocks
28076// of elements of length D1 * .... * DN, across all minibatch entries
28077// in the input.  Any minibatch entry with less than M blocks of elements of
28078// length D1 * ... * DN will be padded with the corresponding default_value
28079// scalar element along the second dimension.
28080func ParseExampleV2(scope *Scope, serialized tf.Output, names tf.Output, sparse_keys tf.Output, dense_keys tf.Output, ragged_keys tf.Output, dense_defaults []tf.Output, num_sparse int64, sparse_types []tf.DataType, ragged_value_types []tf.DataType, ragged_split_types []tf.DataType, dense_shapes []tf.Shape) (sparse_indices []tf.Output, sparse_values []tf.Output, sparse_shapes []tf.Output, dense_values []tf.Output, ragged_values []tf.Output, ragged_row_splits []tf.Output) {
28081	if scope.Err() != nil {
28082		return
28083	}
28084	attrs := map[string]interface{}{"num_sparse": num_sparse, "sparse_types": sparse_types, "ragged_value_types": ragged_value_types, "ragged_split_types": ragged_split_types, "dense_shapes": dense_shapes}
28085	opspec := tf.OpSpec{
28086		Type: "ParseExampleV2",
28087		Input: []tf.Input{
28088			serialized, names, sparse_keys, dense_keys, ragged_keys, tf.OutputList(dense_defaults),
28089		},
28090		Attrs: attrs,
28091	}
28092	op := scope.AddOperation(opspec)
28093	if scope.Err() != nil {
28094		return
28095	}
28096	var idx int
28097	var err error
28098	if sparse_indices, idx, err = makeOutputList(op, idx, "sparse_indices"); err != nil {
28099		scope.UpdateErr("ParseExampleV2", err)
28100		return
28101	}
28102	if sparse_values, idx, err = makeOutputList(op, idx, "sparse_values"); err != nil {
28103		scope.UpdateErr("ParseExampleV2", err)
28104		return
28105	}
28106	if sparse_shapes, idx, err = makeOutputList(op, idx, "sparse_shapes"); err != nil {
28107		scope.UpdateErr("ParseExampleV2", err)
28108		return
28109	}
28110	if dense_values, idx, err = makeOutputList(op, idx, "dense_values"); err != nil {
28111		scope.UpdateErr("ParseExampleV2", err)
28112		return
28113	}
28114	if ragged_values, idx, err = makeOutputList(op, idx, "ragged_values"); err != nil {
28115		scope.UpdateErr("ParseExampleV2", err)
28116		return
28117	}
28118	if ragged_row_splits, idx, err = makeOutputList(op, idx, "ragged_row_splits"); err != nil {
28119		scope.UpdateErr("ParseExampleV2", err)
28120		return
28121	}
28122	return sparse_indices, sparse_values, sparse_shapes, dense_values, ragged_values, ragged_row_splits
28123}
28124
28125// Scatter `updates` into a new tensor according to `indices`.
28126//
28127// Creates a new tensor by applying sparse `updates` to individual values or
28128// slices within a tensor (initially zero for numeric, empty for string) of
28129// the given `shape` according to indices.  This operator is the inverse of the
28130// `tf.gather_nd` operator which extracts values or slices from a given tensor.
28131//
28132// This operation is similar to tensor_scatter_add, except that the tensor is
28133// zero-initialized. Calling `tf.scatter_nd(indices, values, shape)` is identical
28134// to `tensor_scatter_add(tf.zeros(shape, values.dtype), indices, values)`
28135//
28136// If `indices` contains duplicates, then their updates are accumulated (summed).
28137//
28138// **WARNING**: The order in which updates are applied is nondeterministic, so the
28139// output will be nondeterministic if `indices` contains duplicates -- because
28140// of some numerical approximation issues, numbers summed in different order
28141// may yield different results.
28142//
28143// `indices` is an integer tensor containing indices into a new tensor of shape
28144// `shape`.  The last dimension of `indices` can be at most the rank of `shape`:
28145//
28146//     indices.shape[-1] <= shape.rank
28147//
28148// The last dimension of `indices` corresponds to indices into elements
28149// (if `indices.shape[-1] = shape.rank`) or slices
28150// (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of
28151// `shape`.  `updates` is a tensor with shape
28152//
28153//     indices.shape[:-1] + shape[indices.shape[-1]:]
28154//
28155// The simplest form of scatter is to insert individual elements in a tensor by
28156// index. For example, say we want to insert 4 scattered elements in a rank-1
28157// tensor with 8 elements.
28158//
28159// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
28160// <img style="width:100%" src="https://www.tensorflow.org/images/ScatterNd1.png" alt>
28161// </div>
28162//
28163// In Python, this scatter operation would look like this:
28164//
28165// ```python
28166//     indices = tf.constant([[4], [3], [1], [7]])
28167//     updates = tf.constant([9, 10, 11, 12])
28168//     shape = tf.constant([8])
28169//     scatter = tf.scatter_nd(indices, updates, shape)
28170//     print(scatter)
28171// ```
28172//
28173// The resulting tensor would look like this:
28174//
28175//     [0, 11, 0, 10, 9, 0, 0, 12]
28176//
28177// We can also, insert entire slices of a higher rank tensor all at once. For
28178// example, if we wanted to insert two slices in the first dimension of a
28179// rank-3 tensor with two matrices of new values.
28180//
28181// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
28182// <img style="width:100%" src="https://www.tensorflow.org/images/ScatterNd2.png" alt>
28183// </div>
28184//
28185// In Python, this scatter operation would look like this:
28186//
28187// ```python
28188//     indices = tf.constant([[0], [2]])
28189//     updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
28190//                             [7, 7, 7, 7], [8, 8, 8, 8]],
28191//                            [[5, 5, 5, 5], [6, 6, 6, 6],
28192//                             [7, 7, 7, 7], [8, 8, 8, 8]]])
28193//     shape = tf.constant([4, 4, 4])
28194//     scatter = tf.scatter_nd(indices, updates, shape)
28195//     print(scatter)
28196// ```
28197//
28198// The resulting tensor would look like this:
28199//
28200//     [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
28201//      [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
28202//      [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
28203//      [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]
28204//
28205// Note that on CPU, if an out of bound index is found, an error is returned.
28206// On GPU, if an out of bound index is found, the index is ignored.
28207//
28208// Arguments:
28209//	indices: Index tensor.
28210//	updates: Updates to scatter into output.
28211//	shape: 1-D. The shape of the resulting tensor.
28212//
28213// Returns A new tensor with the given shape and updates applied according
28214// to the indices.
28215func ScatterNd(scope *Scope, indices tf.Output, updates tf.Output, shape tf.Output) (output tf.Output) {
28216	if scope.Err() != nil {
28217		return
28218	}
28219	opspec := tf.OpSpec{
28220		Type: "ScatterNd",
28221		Input: []tf.Input{
28222			indices, updates, shape,
28223		},
28224	}
28225	op := scope.AddOperation(opspec)
28226	return op.Output(0)
28227}
28228
28229// UniqueAttr is an optional argument to Unique.
28230type UniqueAttr func(optionalAttr)
28231
28232// UniqueOutIdx sets the optional out_idx attribute to value.
28233// If not specified, defaults to DT_INT32
28234func UniqueOutIdx(value tf.DataType) UniqueAttr {
28235	return func(m optionalAttr) {
28236		m["out_idx"] = value
28237	}
28238}
28239
28240// Finds unique elements in a 1-D tensor.
28241//
28242// This operation returns a tensor `y` containing all of the unique elements of `x`
28243// sorted in the same order that they occur in `x`; `x` does not need to be sorted.
28244// This operation also returns a tensor `idx` the same size as `x` that contains
28245// the index of each value of `x` in the unique output `y`. In other words:
28246//
28247// `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
28248//
28249// Examples:
28250//
28251// ```
28252// # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
28253// y, idx = unique(x)
28254// y ==> [1, 2, 4, 7, 8]
28255// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
28256// ```
28257//
28258// ```
28259// # tensor 'x' is [4, 5, 1, 2, 3, 3, 4, 5]
28260// y, idx = unique(x)
28261// y ==> [4, 5, 1, 2, 3]
28262// idx ==> [0, 1, 2, 3, 4, 4, 0, 1]
28263// ```
28264//
28265// Arguments:
28266//	x: 1-D.
28267//
28268// Returns:
28269//	y: 1-D.
28270//	idx: 1-D.
28271func Unique(scope *Scope, x tf.Output, optional ...UniqueAttr) (y tf.Output, idx tf.Output) {
28272	if scope.Err() != nil {
28273		return
28274	}
28275	attrs := map[string]interface{}{}
28276	for _, a := range optional {
28277		a(attrs)
28278	}
28279	opspec := tf.OpSpec{
28280		Type: "Unique",
28281		Input: []tf.Input{
28282			x,
28283		},
28284		Attrs: attrs,
28285	}
28286	op := scope.AddOperation(opspec)
28287	return op.Output(0), op.Output(1)
28288}
28289
28290// Converts a `RaggedTensor` into a `SparseTensor` with the same values.
28291//
28292// input=ragged.from_nested_row_splits(rt_dense_values, rt_nested_splits)
28293// output=SparseTensor(indices=sparse_indices, values=sparse_values,
28294//                     dense_shape=sparse_dense_shape)
28295//
28296// Arguments:
28297//	rt_nested_splits: The `row_splits` for the `RaggedTensor`.
28298//	rt_dense_values: The `flat_values` for the `RaggedTensor`.
28299//
28300// Returns:
28301//	sparse_indices: The indices for the `SparseTensor`.
28302//	sparse_values: The values of the `SparseTensor`.
28303//	sparse_dense_shape: `sparse_dense_shape` is a tight bounding box of the input `RaggedTensor`.
28304func RaggedTensorToSparse(scope *Scope, rt_nested_splits []tf.Output, rt_dense_values tf.Output) (sparse_indices tf.Output, sparse_values tf.Output, sparse_dense_shape tf.Output) {
28305	if scope.Err() != nil {
28306		return
28307	}
28308	opspec := tf.OpSpec{
28309		Type: "RaggedTensorToSparse",
28310		Input: []tf.Input{
28311			tf.OutputList(rt_nested_splits), rt_dense_values,
28312		},
28313	}
28314	op := scope.AddOperation(opspec)
28315	return op.Output(0), op.Output(1), op.Output(2)
28316}
28317
28318// Produce a string tensor that encodes the state of a Reader.
28319//
28320// Not all Readers support being serialized, so this can produce an
28321// Unimplemented error.
28322//
28323// Arguments:
28324//	reader_handle: Handle to a Reader.
28325func ReaderSerializeStateV2(scope *Scope, reader_handle tf.Output) (state tf.Output) {
28326	if scope.Err() != nil {
28327		return
28328	}
28329	opspec := tf.OpSpec{
28330		Type: "ReaderSerializeStateV2",
28331		Input: []tf.Input{
28332			reader_handle,
28333		},
28334	}
28335	op := scope.AddOperation(opspec)
28336	return op.Output(0)
28337}
28338
28339// Split a `SparseTensor` into `num_split` tensors along one dimension.
28340//
28341// If the `shape[split_dim]` is not an integer multiple of `num_split`. Slices
28342// `[0 : shape[split_dim] % num_split]` gets one extra dimension.
28343// For example, if `split_dim = 1` and `num_split = 2` and the input is
28344//
28345//     input_tensor = shape = [2, 7]
28346//     [    a   d e  ]
28347//     [b c          ]
28348//
28349// Graphically the output tensors are:
28350//
28351//     output_tensor[0] = shape = [2, 4]
28352//     [    a  ]
28353//     [b c    ]
28354//
28355//     output_tensor[1] = shape = [2, 3]
28356//     [ d e  ]
28357//     [      ]
28358//
28359// Arguments:
28360//	split_dim: 0-D.  The dimension along which to split.  Must be in the range
28361// `[0, rank(shape))`.
28362//	indices: 2-D tensor represents the indices of the sparse tensor.
28363//	values: 1-D tensor represents the values of the sparse tensor.
28364//	shape: 1-D. tensor represents the shape of the sparse tensor.
28365// output indices: A list of 1-D tensors represents the indices of the output
28366// sparse tensors.
28367//	num_split: The number of ways to split.
28368//
28369// Returns:
28370//	output_indices
28371//	output_values: A list of 1-D tensors represents the values of the output sparse
28372// tensors.
28373//	output_shape: A list of 1-D tensors represents the shape of the output sparse
28374// tensors.
28375func SparseSplit(scope *Scope, split_dim tf.Output, indices tf.Output, values tf.Output, shape tf.Output, num_split int64) (output_indices []tf.Output, output_values []tf.Output, output_shape []tf.Output) {
28376	if scope.Err() != nil {
28377		return
28378	}
28379	attrs := map[string]interface{}{"num_split": num_split}
28380	opspec := tf.OpSpec{
28381		Type: "SparseSplit",
28382		Input: []tf.Input{
28383			split_dim, indices, values, shape,
28384		},
28385		Attrs: attrs,
28386	}
28387	op := scope.AddOperation(opspec)
28388	if scope.Err() != nil {
28389		return
28390	}
28391	var idx int
28392	var err error
28393	if output_indices, idx, err = makeOutputList(op, idx, "output_indices"); err != nil {
28394		scope.UpdateErr("SparseSplit", err)
28395		return
28396	}
28397	if output_values, idx, err = makeOutputList(op, idx, "output_values"); err != nil {
28398		scope.UpdateErr("SparseSplit", err)
28399		return
28400	}
28401	if output_shape, idx, err = makeOutputList(op, idx, "output_shape"); err != nil {
28402		scope.UpdateErr("SparseSplit", err)
28403		return
28404	}
28405	return output_indices, output_values, output_shape
28406}
28407
28408// Computes rectified linear 6: `min(max(features, 0), 6)`.
28409func Relu6(scope *Scope, features tf.Output) (activations tf.Output) {
28410	if scope.Err() != nil {
28411		return
28412	}
28413	opspec := tf.OpSpec{
28414		Type: "Relu6",
28415		Input: []tf.Input{
28416			features,
28417		},
28418	}
28419	op := scope.AddOperation(opspec)
28420	return op.Output(0)
28421}
28422
28423// RaggedRangeAttr is an optional argument to RaggedRange.
28424type RaggedRangeAttr func(optionalAttr)
28425
28426// RaggedRangeTsplits sets the optional Tsplits attribute to value.
28427// If not specified, defaults to DT_INT64
28428func RaggedRangeTsplits(value tf.DataType) RaggedRangeAttr {
28429	return func(m optionalAttr) {
28430		m["Tsplits"] = value
28431	}
28432}
28433
28434// Returns a `RaggedTensor` containing the specified sequences of numbers.
28435//
28436//
28437// Returns a `RaggedTensor` `result` composed from `rt_dense_values` and
28438// `rt_nested_splits`, such that
28439// `result[i] = range(starts[i], limits[i], deltas[i])`.
28440//
28441// ```python
28442// (rt_nested_splits, rt_dense_values) = ragged_range(
28443//       starts=[2, 5, 8], limits=[3, 5, 12], deltas=1)
28444// result = tf.ragged.from_row_splits(rt_dense_values, rt_nested_splits)
28445// print(result)
28446// <tf.RaggedTensor [[2], [], [8, 9, 10, 11]] >
28447// ```
28448//
28449// The input tensors `starts`, `limits`, and `deltas` may be scalars or vectors.
28450// The vector inputs must all have the same size.  Scalar inputs are broadcast
28451// to match the size of the vector inputs.
28452//
28453// Arguments:
28454//	starts: The starts of each range.
28455//	limits: The limits of each range.
28456//	deltas: The deltas of each range.
28457//
28458// Returns:
28459//	rt_nested_splits: The `row_splits` for the returned `RaggedTensor`.
28460//	rt_dense_values: The `flat_values` for the returned `RaggedTensor`.
28461func RaggedRange(scope *Scope, starts tf.Output, limits tf.Output, deltas tf.Output, optional ...RaggedRangeAttr) (rt_nested_splits tf.Output, rt_dense_values tf.Output) {
28462	if scope.Err() != nil {
28463		return
28464	}
28465	attrs := map[string]interface{}{}
28466	for _, a := range optional {
28467		a(attrs)
28468	}
28469	opspec := tf.OpSpec{
28470		Type: "RaggedRange",
28471		Input: []tf.Input{
28472			starts, limits, deltas,
28473		},
28474		Attrs: attrs,
28475	}
28476	op := scope.AddOperation(opspec)
28477	return op.Output(0), op.Output(1)
28478}
28479
28480// RandomPoissonV2Attr is an optional argument to RandomPoissonV2.
28481type RandomPoissonV2Attr func(optionalAttr)
28482
28483// RandomPoissonV2Seed sets the optional seed attribute to value.
28484//
28485// value: If either `seed` or `seed2` are set to be non-zero, the random number
28486// generator is seeded by the given seed.  Otherwise, it is seeded by a
28487// random seed.
28488// If not specified, defaults to 0
28489func RandomPoissonV2Seed(value int64) RandomPoissonV2Attr {
28490	return func(m optionalAttr) {
28491		m["seed"] = value
28492	}
28493}
28494
28495// RandomPoissonV2Seed2 sets the optional seed2 attribute to value.
28496//
28497// value: A second seed to avoid seed collision.
28498// If not specified, defaults to 0
28499func RandomPoissonV2Seed2(value int64) RandomPoissonV2Attr {
28500	return func(m optionalAttr) {
28501		m["seed2"] = value
28502	}
28503}
28504
28505// RandomPoissonV2Dtype sets the optional dtype attribute to value.
28506// If not specified, defaults to DT_INT64
28507func RandomPoissonV2Dtype(value tf.DataType) RandomPoissonV2Attr {
28508	return func(m optionalAttr) {
28509		m["dtype"] = value
28510	}
28511}
28512
28513// Outputs random values from the Poisson distribution(s) described by rate.
28514//
28515// This op uses two algorithms, depending on rate. If rate >= 10, then
28516// the algorithm by Hormann is used to acquire samples via
28517// transformation-rejection.
28518// See http://www.sciencedirect.com/science/article/pii/0167668793909974.
28519//
28520// Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform
28521// random variables.
28522// See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer
28523// Programming, Volume 2. Addison Wesley
28524//
28525// Arguments:
28526//	shape: 1-D integer tensor. Shape of independent samples to draw from each
28527// distribution described by the shape parameters given in rate.
28528//	rate: A tensor in which each scalar is a "rate" parameter describing the
28529// associated poisson distribution.
28530//
28531// Returns A tensor with shape `shape + shape(rate)`. Each slice
28532// `[:, ..., :, i0, i1, ...iN]` contains the samples drawn for
28533// `rate[i0, i1, ...iN]`.
28534func RandomPoissonV2(scope *Scope, shape tf.Output, rate tf.Output, optional ...RandomPoissonV2Attr) (output tf.Output) {
28535	if scope.Err() != nil {
28536		return
28537	}
28538	attrs := map[string]interface{}{}
28539	for _, a := range optional {
28540		a(attrs)
28541	}
28542	opspec := tf.OpSpec{
28543		Type: "RandomPoissonV2",
28544		Input: []tf.Input{
28545			shape, rate,
28546		},
28547		Attrs: attrs,
28548	}
28549	op := scope.AddOperation(opspec)
28550	return op.Output(0)
28551}
28552
28553// LoadTPUEmbeddingCenteredRMSPropParametersAttr is an optional argument to LoadTPUEmbeddingCenteredRMSPropParameters.
28554type LoadTPUEmbeddingCenteredRMSPropParametersAttr func(optionalAttr)
28555
28556// LoadTPUEmbeddingCenteredRMSPropParametersTableId sets the optional table_id attribute to value.
28557// If not specified, defaults to -1
28558func LoadTPUEmbeddingCenteredRMSPropParametersTableId(value int64) LoadTPUEmbeddingCenteredRMSPropParametersAttr {
28559	return func(m optionalAttr) {
28560		m["table_id"] = value
28561	}
28562}
28563
28564// LoadTPUEmbeddingCenteredRMSPropParametersTableName sets the optional table_name attribute to value.
28565// If not specified, defaults to ""
28566func LoadTPUEmbeddingCenteredRMSPropParametersTableName(value string) LoadTPUEmbeddingCenteredRMSPropParametersAttr {
28567	return func(m optionalAttr) {
28568		m["table_name"] = value
28569	}
28570}
28571
28572// LoadTPUEmbeddingCenteredRMSPropParametersConfig sets the optional config attribute to value.
28573// If not specified, defaults to ""
28574func LoadTPUEmbeddingCenteredRMSPropParametersConfig(value string) LoadTPUEmbeddingCenteredRMSPropParametersAttr {
28575	return func(m optionalAttr) {
28576		m["config"] = value
28577	}
28578}
28579
28580// Load centered RMSProp embedding parameters.
28581//
28582// An op that loads optimization parameters into HBM for embedding. Must be
28583// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
28584// embedding table configuration. For example, this op is used to install
28585// parameters that are loaded from a checkpoint before a training loop is
28586// executed.
28587//
28588// Arguments:
28589//	parameters: Value of parameters used in the centered RMSProp optimization algorithm.
28590//	ms: Value of ms used in the centered RMSProp optimization algorithm.
28591//	mom: Value of mom used in the centered RMSProp optimization algorithm.
28592//	mg: Value of mg used in the centered RMSProp optimization algorithm.
28593//
28594//
28595//
28596// Returns the created operation.
28597func LoadTPUEmbeddingCenteredRMSPropParameters(scope *Scope, parameters tf.Output, ms tf.Output, mom tf.Output, mg tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingCenteredRMSPropParametersAttr) (o *tf.Operation) {
28598	if scope.Err() != nil {
28599		return
28600	}
28601	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
28602	for _, a := range optional {
28603		a(attrs)
28604	}
28605	opspec := tf.OpSpec{
28606		Type: "LoadTPUEmbeddingCenteredRMSPropParameters",
28607		Input: []tf.Input{
28608			parameters, ms, mom, mg,
28609		},
28610		Attrs: attrs,
28611	}
28612	return scope.AddOperation(opspec)
28613}
28614
28615// RandomPoissonAttr is an optional argument to RandomPoisson.
28616type RandomPoissonAttr func(optionalAttr)
28617
28618// RandomPoissonSeed sets the optional seed attribute to value.
28619// If not specified, defaults to 0
28620func RandomPoissonSeed(value int64) RandomPoissonAttr {
28621	return func(m optionalAttr) {
28622		m["seed"] = value
28623	}
28624}
28625
28626// RandomPoissonSeed2 sets the optional seed2 attribute to value.
28627// If not specified, defaults to 0
28628func RandomPoissonSeed2(value int64) RandomPoissonAttr {
28629	return func(m optionalAttr) {
28630		m["seed2"] = value
28631	}
28632}
28633
28634// Use RandomPoissonV2 instead.
28635//
28636// DEPRECATED at GraphDef version 25: Replaced by RandomPoissonV2
28637func RandomPoisson(scope *Scope, shape tf.Output, rate tf.Output, optional ...RandomPoissonAttr) (output tf.Output) {
28638	if scope.Err() != nil {
28639		return
28640	}
28641	attrs := map[string]interface{}{}
28642	for _, a := range optional {
28643		a(attrs)
28644	}
28645	opspec := tf.OpSpec{
28646		Type: "RandomPoisson",
28647		Input: []tf.Input{
28648			shape, rate,
28649		},
28650		Attrs: attrs,
28651	}
28652	op := scope.AddOperation(opspec)
28653	return op.Output(0)
28654}
28655
28656// Computes the derivative of a Gamma random sample w.r.t. `alpha`.
28657func RandomGammaGrad(scope *Scope, alpha tf.Output, sample tf.Output) (output tf.Output) {
28658	if scope.Err() != nil {
28659		return
28660	}
28661	opspec := tf.OpSpec{
28662		Type: "RandomGammaGrad",
28663		Input: []tf.Input{
28664			alpha, sample,
28665		},
28666	}
28667	op := scope.AddOperation(opspec)
28668	return op.Output(0)
28669}
28670
28671// Get the value of the tensor specified by its handle.
28672//
28673// Arguments:
28674//	handle: The handle for a tensor stored in the session state.
28675//	dtype: The type of the output value.
28676//
28677// Returns The tensor for the given handle.
28678func GetSessionTensor(scope *Scope, handle tf.Output, dtype tf.DataType) (value tf.Output) {
28679	if scope.Err() != nil {
28680		return
28681	}
28682	attrs := map[string]interface{}{"dtype": dtype}
28683	opspec := tf.OpSpec{
28684		Type: "GetSessionTensor",
28685		Input: []tf.Input{
28686			handle,
28687		},
28688		Attrs: attrs,
28689	}
28690	op := scope.AddOperation(opspec)
28691	return op.Output(0)
28692}
28693
28694// MinAttr is an optional argument to Min.
28695type MinAttr func(optionalAttr)
28696
28697// MinKeepDims sets the optional keep_dims attribute to value.
28698//
28699// value: If true, retain reduced dimensions with length 1.
28700// If not specified, defaults to false
28701func MinKeepDims(value bool) MinAttr {
28702	return func(m optionalAttr) {
28703		m["keep_dims"] = value
28704	}
28705}
28706
28707// Computes the minimum of elements across dimensions of a tensor.
28708//
28709// Reduces `input` along the dimensions given in `axis`. Unless
28710// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
28711// `axis`. If `keep_dims` is true, the reduced dimensions are
28712// retained with length 1.
28713//
28714// Arguments:
28715//	input: The tensor to reduce.
28716//	axis: The dimensions to reduce. Must be in the range
28717// `[-rank(input), rank(input))`.
28718//
28719// Returns The reduced tensor.
28720func Min(scope *Scope, input tf.Output, axis tf.Output, optional ...MinAttr) (output tf.Output) {
28721	if scope.Err() != nil {
28722		return
28723	}
28724	attrs := map[string]interface{}{}
28725	for _, a := range optional {
28726		a(attrs)
28727	}
28728	opspec := tf.OpSpec{
28729		Type: "Min",
28730		Input: []tf.Input{
28731			input, axis,
28732		},
28733		Attrs: attrs,
28734	}
28735	op := scope.AddOperation(opspec)
28736	return op.Output(0)
28737}
28738
28739// Outputs a tensor containing the reduction across all input tensors.
28740//
28741// Outputs a tensor containing the reduction across all input tensors passed to ops
28742// within the same `shared_name.
28743//
28744// The graph should be constructed so if one op runs with shared_name value `c`,
28745// then `num_devices` ops will run with shared_name value `c`.  Failure to do so
28746// will cause the graph execution to fail to complete.
28747//
28748// input: the input to the reduction
28749// data: the value of the reduction across all `num_devices` devices.
28750// reduction: the reduction operation to perform.
28751// num_devices: The number of devices participating in this reduction.
28752// shared_name: Identifier that shared between ops of the same reduction.
28753func NcclAllReduce(scope *Scope, input tf.Output, reduction string, num_devices int64, shared_name string) (data tf.Output) {
28754	if scope.Err() != nil {
28755		return
28756	}
28757	attrs := map[string]interface{}{"reduction": reduction, "num_devices": num_devices, "shared_name": shared_name}
28758	opspec := tf.OpSpec{
28759		Type: "NcclAllReduce",
28760		Input: []tf.Input{
28761			input,
28762		},
28763		Attrs: attrs,
28764	}
28765	op := scope.AddOperation(opspec)
28766	return op.Output(0)
28767}
28768
28769// RandomShuffleAttr is an optional argument to RandomShuffle.
28770type RandomShuffleAttr func(optionalAttr)
28771
28772// RandomShuffleSeed sets the optional seed attribute to value.
28773//
28774// value: If either `seed` or `seed2` are set to be non-zero, the random number
28775// generator is seeded by the given seed.  Otherwise, it is seeded by a
28776// random seed.
28777// If not specified, defaults to 0
28778func RandomShuffleSeed(value int64) RandomShuffleAttr {
28779	return func(m optionalAttr) {
28780		m["seed"] = value
28781	}
28782}
28783
28784// RandomShuffleSeed2 sets the optional seed2 attribute to value.
28785//
28786// value: A second seed to avoid seed collision.
28787// If not specified, defaults to 0
28788func RandomShuffleSeed2(value int64) RandomShuffleAttr {
28789	return func(m optionalAttr) {
28790		m["seed2"] = value
28791	}
28792}
28793
28794// Randomly shuffles a tensor along its first dimension.
28795//
28796//   The tensor is shuffled along dimension 0, such that each `value[j]` is mapped
28797//   to one and only one `output[i]`. For example, a mapping that might occur for a
28798//   3x2 tensor is:
28799//
28800// ```
28801// [[1, 2],       [[5, 6],
28802//  [3, 4],  ==>   [1, 2],
28803//  [5, 6]]        [3, 4]]
28804// ```
28805//
28806// Arguments:
28807//	value: The tensor to be shuffled.
28808//
28809// Returns A tensor of same shape and type as `value`, shuffled along its first
28810// dimension.
28811func RandomShuffle(scope *Scope, value tf.Output, optional ...RandomShuffleAttr) (output tf.Output) {
28812	if scope.Err() != nil {
28813		return
28814	}
28815	attrs := map[string]interface{}{}
28816	for _, a := range optional {
28817		a(attrs)
28818	}
28819	opspec := tf.OpSpec{
28820		Type: "RandomShuffle",
28821		Input: []tf.Input{
28822			value,
28823		},
28824		Attrs: attrs,
28825	}
28826	op := scope.AddOperation(opspec)
28827	return op.Output(0)
28828}
28829
28830// Creates a dataset that takes a Bernoulli sample of the contents of another dataset.
28831//
28832// There is no transformation in the `tf.data` Python API for creating this dataset.
28833// Instead, it is created as a result of the `filter_with_random_uniform_fusion`
28834// static optimization. Whether this optimization is performed is determined by the
28835// `experimental_optimization.filter_with_random_uniform_fusion` option of
28836// `tf.data.Options`.
28837//
28838// Arguments:
28839//
28840//	rate: A scalar representing the sample rate. Each element of `input_dataset` is
28841// retained with this probability, independent of all other elements.
28842//	seed: A scalar representing seed of random number generator.
28843//	seed2: A scalar representing seed2 of random number generator.
28844//
28845//
28846func SamplingDataset(scope *Scope, input_dataset tf.Output, rate tf.Output, seed tf.Output, seed2 tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
28847	if scope.Err() != nil {
28848		return
28849	}
28850	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
28851	opspec := tf.OpSpec{
28852		Type: "SamplingDataset",
28853		Input: []tf.Input{
28854			input_dataset, rate, seed, seed2,
28855		},
28856		Attrs: attrs,
28857	}
28858	op := scope.AddOperation(opspec)
28859	return op.Output(0)
28860}
28861
28862// Reads and outputs the entire contents of the input filename.
28863func ReadFile(scope *Scope, filename tf.Output) (contents tf.Output) {
28864	if scope.Err() != nil {
28865		return
28866	}
28867	opspec := tf.OpSpec{
28868		Type: "ReadFile",
28869		Input: []tf.Input{
28870			filename,
28871		},
28872	}
28873	op := scope.AddOperation(opspec)
28874	return op.Output(0)
28875}
28876
28877// Computes requantization range per channel.
28878//
28879// Arguments:
28880//	input: The original input tensor.
28881//	input_min: The minimum value of the input tensor
28882//	input_max: The maximum value of the input tensor.
28883//	clip_value_max: The maximum value of the output that needs to be clipped.
28884// Example: set this to 6 for Relu6.
28885//
28886// Returns:
28887//	output_min: The minimum value of the final output tensor
28888//	output_max: The maximum value of the final output tensor.
28889func RequantizationRangePerChannel(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, clip_value_max float32) (output_min tf.Output, output_max tf.Output) {
28890	if scope.Err() != nil {
28891		return
28892	}
28893	attrs := map[string]interface{}{"clip_value_max": clip_value_max}
28894	opspec := tf.OpSpec{
28895		Type: "RequantizationRangePerChannel",
28896		Input: []tf.Input{
28897			input, input_min, input_max,
28898		},
28899		Attrs: attrs,
28900	}
28901	op := scope.AddOperation(opspec)
28902	return op.Output(0), op.Output(1)
28903}
28904
28905// TruncatedNormalAttr is an optional argument to TruncatedNormal.
28906type TruncatedNormalAttr func(optionalAttr)
28907
28908// TruncatedNormalSeed sets the optional seed attribute to value.
28909//
28910// value: If either `seed` or `seed2` are set to be non-zero, the random number
28911// generator is seeded by the given seed.  Otherwise, it is seeded by a
28912// random seed.
28913// If not specified, defaults to 0
28914func TruncatedNormalSeed(value int64) TruncatedNormalAttr {
28915	return func(m optionalAttr) {
28916		m["seed"] = value
28917	}
28918}
28919
28920// TruncatedNormalSeed2 sets the optional seed2 attribute to value.
28921//
28922// value: A second seed to avoid seed collision.
28923// If not specified, defaults to 0
28924func TruncatedNormalSeed2(value int64) TruncatedNormalAttr {
28925	return func(m optionalAttr) {
28926		m["seed2"] = value
28927	}
28928}
28929
28930// Outputs random values from a truncated normal distribution.
28931//
28932// The generated values follow a normal distribution with mean 0 and standard
28933// deviation 1, except that values whose magnitude is more than 2 standard
28934// deviations from the mean are dropped and re-picked.
28935//
28936// Arguments:
28937//	shape: The shape of the output tensor.
28938//	dtype: The type of the output.
28939//
28940// Returns A tensor of the specified shape filled with random truncated normal
28941// values.
28942func TruncatedNormal(scope *Scope, shape tf.Output, dtype tf.DataType, optional ...TruncatedNormalAttr) (output tf.Output) {
28943	if scope.Err() != nil {
28944		return
28945	}
28946	attrs := map[string]interface{}{"dtype": dtype}
28947	for _, a := range optional {
28948		a(attrs)
28949	}
28950	opspec := tf.OpSpec{
28951		Type: "TruncatedNormal",
28952		Input: []tf.Input{
28953			shape,
28954		},
28955		Attrs: attrs,
28956	}
28957	op := scope.AddOperation(opspec)
28958	return op.Output(0)
28959}
28960
28961// Computes the complementary error function of `x` element-wise.
28962func Erfc(scope *Scope, x tf.Output) (y tf.Output) {
28963	if scope.Err() != nil {
28964		return
28965	}
28966	opspec := tf.OpSpec{
28967		Type: "Erfc",
28968		Input: []tf.Input{
28969			x,
28970		},
28971	}
28972	op := scope.AddOperation(opspec)
28973	return op.Output(0)
28974}
28975
28976// Returns max(x, y) element-wise.
28977//
28978// *NOTE*: `RiscMax` does not supports broadcasting.
28979//
28980// Given two input tensors, the `tf.risc_max` operation computes the maximum for every element in the tensor.
28981//
28982func RiscMax(scope *Scope, x tf.Output, y tf.Output) (max tf.Output) {
28983	if scope.Err() != nil {
28984		return
28985	}
28986	opspec := tf.OpSpec{
28987		Type: "RiscMax",
28988		Input: []tf.Input{
28989			x, y,
28990		},
28991	}
28992	op := scope.AddOperation(opspec)
28993	return op.Output(0)
28994}
28995
28996// RandomUniformIntAttr is an optional argument to RandomUniformInt.
28997type RandomUniformIntAttr func(optionalAttr)
28998
28999// RandomUniformIntSeed sets the optional seed attribute to value.
29000//
29001// value: If either `seed` or `seed2` are set to be non-zero, the random number
29002// generator is seeded by the given seed.  Otherwise, it is seeded by a
29003// random seed.
29004// If not specified, defaults to 0
29005func RandomUniformIntSeed(value int64) RandomUniformIntAttr {
29006	return func(m optionalAttr) {
29007		m["seed"] = value
29008	}
29009}
29010
29011// RandomUniformIntSeed2 sets the optional seed2 attribute to value.
29012//
29013// value: A second seed to avoid seed collision.
29014// If not specified, defaults to 0
29015func RandomUniformIntSeed2(value int64) RandomUniformIntAttr {
29016	return func(m optionalAttr) {
29017		m["seed2"] = value
29018	}
29019}
29020
29021// Outputs random integers from a uniform distribution.
29022//
29023// The generated values are uniform integers in the range `[minval, maxval)`.
29024// The lower bound `minval` is included in the range, while the upper bound
29025// `maxval` is excluded.
29026//
29027// The random integers are slightly biased unless `maxval - minval` is an exact
29028// power of two.  The bias is small for values of `maxval - minval` significantly
29029// smaller than the range of the output (either `2^32` or `2^64`).
29030//
29031// Arguments:
29032//	shape: The shape of the output tensor.
29033//	minval: 0-D.  Inclusive lower bound on the generated integers.
29034//	maxval: 0-D.  Exclusive upper bound on the generated integers.
29035//
29036// Returns A tensor of the specified shape filled with uniform random integers.
29037func RandomUniformInt(scope *Scope, shape tf.Output, minval tf.Output, maxval tf.Output, optional ...RandomUniformIntAttr) (output tf.Output) {
29038	if scope.Err() != nil {
29039		return
29040	}
29041	attrs := map[string]interface{}{}
29042	for _, a := range optional {
29043		a(attrs)
29044	}
29045	opspec := tf.OpSpec{
29046		Type: "RandomUniformInt",
29047		Input: []tf.Input{
29048			shape, minval, maxval,
29049		},
29050		Attrs: attrs,
29051	}
29052	op := scope.AddOperation(opspec)
29053	return op.Output(0)
29054}
29055
29056// RandomUniformAttr is an optional argument to RandomUniform.
29057type RandomUniformAttr func(optionalAttr)
29058
29059// RandomUniformSeed sets the optional seed attribute to value.
29060//
29061// value: If either `seed` or `seed2` are set to be non-zero, the random number
29062// generator is seeded by the given seed.  Otherwise, it is seeded by a
29063// random seed.
29064// If not specified, defaults to 0
29065func RandomUniformSeed(value int64) RandomUniformAttr {
29066	return func(m optionalAttr) {
29067		m["seed"] = value
29068	}
29069}
29070
29071// RandomUniformSeed2 sets the optional seed2 attribute to value.
29072//
29073// value: A second seed to avoid seed collision.
29074// If not specified, defaults to 0
29075func RandomUniformSeed2(value int64) RandomUniformAttr {
29076	return func(m optionalAttr) {
29077		m["seed2"] = value
29078	}
29079}
29080
29081// Outputs random values from a uniform distribution.
29082//
29083// The generated values follow a uniform distribution in the range `[0, 1)`. The
29084// lower bound 0 is included in the range, while the upper bound 1 is excluded.
29085//
29086// Arguments:
29087//	shape: The shape of the output tensor.
29088//	dtype: The type of the output.
29089//
29090// Returns A tensor of the specified shape filled with uniform random values.
29091func RandomUniform(scope *Scope, shape tf.Output, dtype tf.DataType, optional ...RandomUniformAttr) (output tf.Output) {
29092	if scope.Err() != nil {
29093		return
29094	}
29095	attrs := map[string]interface{}{"dtype": dtype}
29096	for _, a := range optional {
29097		a(attrs)
29098	}
29099	opspec := tf.OpSpec{
29100		Type: "RandomUniform",
29101		Input: []tf.Input{
29102			shape,
29103		},
29104		Attrs: attrs,
29105	}
29106	op := scope.AddOperation(opspec)
29107	return op.Output(0)
29108}
29109
29110// Produces the average pool of the input tensor for quantized types.
29111//
29112// Arguments:
29113//	input: 4-D with shape `[batch, height, width, channels]`.
29114//	min_input: The float value that the lowest quantized input value represents.
29115//	max_input: The float value that the highest quantized input value represents.
29116//	ksize: The size of the window for each dimension of the input tensor.
29117// The length must be 4 to match the number of dimensions of the input.
29118//	strides: The stride of the sliding window for each dimension of the input
29119// tensor.  The length must be 4 to match the number of dimensions of the input.
29120//	padding: The type of padding algorithm to use.
29121//
29122// Returns:
29123//	output
29124//	min_output: The float value that the lowest quantized output value represents.
29125//	max_output: The float value that the highest quantized output value represents.
29126func QuantizedAvgPool(scope *Scope, input tf.Output, min_input tf.Output, max_input tf.Output, ksize []int64, strides []int64, padding string) (output tf.Output, min_output tf.Output, max_output tf.Output) {
29127	if scope.Err() != nil {
29128		return
29129	}
29130	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
29131	opspec := tf.OpSpec{
29132		Type: "QuantizedAvgPool",
29133		Input: []tf.Input{
29134			input, min_input, max_input,
29135		},
29136		Attrs: attrs,
29137	}
29138	op := scope.AddOperation(opspec)
29139	return op.Output(0), op.Output(1), op.Output(2)
29140}
29141
29142// Adds v into specified rows of x.
29143//
29144//     Computes y = x; y[i, :] += v; return y.
29145//
29146// Arguments:
29147//	x: A `Tensor` of type T.
29148//	i: A vector. Indices into the left-most dimension of `x`.
29149//	v: A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size.
29150//
29151// Returns A `Tensor` of type T. An alias of `x`. The content of `y` is undefined if there are duplicates in `i`.
29152func InplaceAdd(scope *Scope, x tf.Output, i tf.Output, v tf.Output) (y tf.Output) {
29153	if scope.Err() != nil {
29154		return
29155	}
29156	opspec := tf.OpSpec{
29157		Type: "InplaceAdd",
29158		Input: []tf.Input{
29159			x, i, v,
29160		},
29161	}
29162	op := scope.AddOperation(opspec)
29163	return op.Output(0)
29164}
29165
29166// PrintAttr is an optional argument to Print.
29167type PrintAttr func(optionalAttr)
29168
29169// PrintMessage sets the optional message attribute to value.
29170//
29171// value: A string, prefix of the error message.
29172// If not specified, defaults to ""
29173func PrintMessage(value string) PrintAttr {
29174	return func(m optionalAttr) {
29175		m["message"] = value
29176	}
29177}
29178
29179// PrintFirstN sets the optional first_n attribute to value.
29180//
29181// value: Only log `first_n` number of times. -1 disables logging.
29182// If not specified, defaults to -1
29183func PrintFirstN(value int64) PrintAttr {
29184	return func(m optionalAttr) {
29185		m["first_n"] = value
29186	}
29187}
29188
29189// PrintSummarize sets the optional summarize attribute to value.
29190//
29191// value: Only print this many entries of each tensor.
29192// If not specified, defaults to 3
29193func PrintSummarize(value int64) PrintAttr {
29194	return func(m optionalAttr) {
29195		m["summarize"] = value
29196	}
29197}
29198
29199// Prints a list of tensors.
29200//
29201// Passes `input` through to `output` and prints `data` when evaluating.
29202//
29203// Arguments:
29204//	input: The tensor passed to `output`
29205//	data: A list of tensors to print out when op is evaluated.
29206//
29207// Returns = The unmodified `input` tensor
29208func Print(scope *Scope, input tf.Output, data []tf.Output, optional ...PrintAttr) (output tf.Output) {
29209	if scope.Err() != nil {
29210		return
29211	}
29212	attrs := map[string]interface{}{}
29213	for _, a := range optional {
29214		a(attrs)
29215	}
29216	opspec := tf.OpSpec{
29217		Type: "Print",
29218		Input: []tf.Input{
29219			input, tf.OutputList(data),
29220		},
29221		Attrs: attrs,
29222	}
29223	op := scope.AddOperation(opspec)
29224	return op.Output(0)
29225}
29226
29227// Computes the Approximate Minimum Degree (AMD) ordering of `input`.
29228//
29229// Computes the Approximate Minimum Degree (AMD) ordering for a sparse matrix.
29230//
29231// The returned permutation may be used to permute the rows and columns of the
29232// given sparse matrix. This typically results in permuted sparse matrix's sparse
29233// Cholesky (or other decompositions) in having fewer zero fill-in compared to
29234// decomposition of the original matrix.
29235//
29236// The input sparse matrix may have rank 2 or rank 3. The output Tensor,
29237// representing would then have rank 1 or 2 respectively, with the same batch
29238// shape as the input.
29239//
29240// Each component of the input sparse matrix must represent a square symmetric
29241// matrix; only the lower triangular part of the matrix is read. The values of the
29242// sparse matrix does not affect the returned permutation, only the sparsity
29243// pattern of the sparse matrix is used. Hence, a single AMD ordering may be
29244// reused for the Cholesky decompositions of sparse matrices with the same sparsity
29245// pattern but with possibly different values.
29246//
29247// Each batch component of the output permutation represents a permutation of `N`
29248// elements, where the input sparse matrix components each have `N` rows. That is,
29249// the component contains each of the integers `{0, .. N-1}` exactly once. The
29250// `i`th element represents the row index that the `i`th row maps to.
29251//
29252// Usage example:
29253//
29254// ```python
29255//     from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops
29256//
29257//     a_indices = np.array([[0, 0], [1, 1], [2, 1], [2, 2], [3, 3]])
29258//     a_values = np.array([1.0, 2.0, 1.0, 3.0, 4.0], np.float32)
29259//     a_dense_shape = [4, 4]
29260//
29261//     with tf.Session() as sess:
29262//       # Define (COO format) SparseTensor over Numpy array.
29263//       a_st = tf.sparse.SparseTensor(a_indices, a_values, a_dense_shape)
29264//
29265//       # Convert SparseTensors to CSR SparseMatrix.
29266//       a_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(
29267//           a_st.indices, a_st.values, a_st.dense_shape)
29268//
29269//       # Obtain the AMD Ordering for the CSR SparseMatrix.
29270//       ordering_amd = sparse_csr_matrix_ops.sparse_matrix_ordering_amd(sparse_matrix)
29271//
29272//       ordering_amd_value = sess.run(ordering_amd)
29273// ```
29274//
29275// `ordering_amd_value` stores the AMD ordering: `[1 2 3 0]`.
29276//
29277// input: A `CSRSparseMatrix`.
29278//
29279// Arguments:
29280//	input: A `CSRSparseMatrix`.
29281//
29282// Returns The Approximate Minimum Degree (AMD) ordering of `input`.
29283func SparseMatrixOrderingAMD(scope *Scope, input tf.Output) (output tf.Output) {
29284	if scope.Err() != nil {
29285		return
29286	}
29287	opspec := tf.OpSpec{
29288		Type: "SparseMatrixOrderingAMD",
29289		Input: []tf.Input{
29290			input,
29291		},
29292	}
29293	op := scope.AddOperation(opspec)
29294	return op.Output(0)
29295}
29296
29297// FakeQuantWithMinMaxVarsAttr is an optional argument to FakeQuantWithMinMaxVars.
29298type FakeQuantWithMinMaxVarsAttr func(optionalAttr)
29299
29300// FakeQuantWithMinMaxVarsNumBits sets the optional num_bits attribute to value.
29301// If not specified, defaults to 8
29302func FakeQuantWithMinMaxVarsNumBits(value int64) FakeQuantWithMinMaxVarsAttr {
29303	return func(m optionalAttr) {
29304		m["num_bits"] = value
29305	}
29306}
29307
29308// FakeQuantWithMinMaxVarsNarrowRange sets the optional narrow_range attribute to value.
29309// If not specified, defaults to false
29310func FakeQuantWithMinMaxVarsNarrowRange(value bool) FakeQuantWithMinMaxVarsAttr {
29311	return func(m optionalAttr) {
29312		m["narrow_range"] = value
29313	}
29314}
29315
29316// Fake-quantize the 'inputs' tensor of type float via global float scalars
29317//
29318// Fake-quantize the `inputs` tensor of type float via global float scalars
29319// `min` and `max` to `outputs` tensor of same shape as `inputs`.
29320//
29321// Attributes
29322//
29323// *   `[min; max]` define the clamping range for the `inputs` data.
29324// *   `inputs` values are quantized into the quantization range (
29325// `[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]`
29326// when it is true) and then de-quantized and output as floats in `[min; max]`
29327// interval.
29328// *   `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.
29329//
29330// Before quantization, `min` and `max` values are adjusted with the following
29331// logic.
29332// It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values,
29333// the behavior can be unexpected:
29334//
29335// *   If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`.
29336// *   If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`.
29337// *   If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `,
29338// `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`.
29339//
29340// This operation has a gradient and thus allows for training `min` and `max`
29341// values.
29342func FakeQuantWithMinMaxVars(scope *Scope, inputs tf.Output, min tf.Output, max tf.Output, optional ...FakeQuantWithMinMaxVarsAttr) (outputs tf.Output) {
29343	if scope.Err() != nil {
29344		return
29345	}
29346	attrs := map[string]interface{}{}
29347	for _, a := range optional {
29348		a(attrs)
29349	}
29350	opspec := tf.OpSpec{
29351		Type: "FakeQuantWithMinMaxVars",
29352		Input: []tf.Input{
29353			inputs, min, max,
29354		},
29355		Attrs: attrs,
29356	}
29357	op := scope.AddOperation(opspec)
29358	return op.Output(0)
29359}
29360
29361// Enqueue multiple Tensor values on the computation outfeed.
29362//
29363// Arguments:
29364//	inputs: A list of tensors that will be inserted into the outfeed queue as an
29365// XLA tuple.
29366//
29367// Returns the created operation.
29368func OutfeedEnqueueTuple(scope *Scope, inputs []tf.Output) (o *tf.Operation) {
29369	if scope.Err() != nil {
29370		return
29371	}
29372	opspec := tf.OpSpec{
29373		Type: "OutfeedEnqueueTuple",
29374		Input: []tf.Input{
29375			tf.OutputList(inputs),
29376		},
29377	}
29378	return scope.AddOperation(opspec)
29379}
29380
29381// Wraps the XLA Gather operator documented at
29382//
29383//   https://www.tensorflow.org/xla/operation_semantics#gather
29384//
29385// Arguments:
29386//	operand: The array we're gathering from.
29387//	start_indices: Array containing the starting indices of the slices we gather.
29388//	slice_sizes: slice_sizes[i] is the bounds for the slice on dimension i.
29389//	dimension_numbers: A serialized xla::GatherDimensionNumbers proto.
29390//	indices_are_sorted: Boolean indicating if the indices are sorted.
29391func XlaGather(scope *Scope, operand tf.Output, start_indices tf.Output, slice_sizes tf.Output, dimension_numbers string, indices_are_sorted bool) (output tf.Output) {
29392	if scope.Err() != nil {
29393		return
29394	}
29395	attrs := map[string]interface{}{"dimension_numbers": dimension_numbers, "indices_are_sorted": indices_are_sorted}
29396	opspec := tf.OpSpec{
29397		Type: "XlaGather",
29398		Input: []tf.Input{
29399			operand, start_indices, slice_sizes,
29400		},
29401		Attrs: attrs,
29402	}
29403	op := scope.AddOperation(opspec)
29404	return op.Output(0)
29405}
29406
29407// QuantizedConv2DAttr is an optional argument to QuantizedConv2D.
29408type QuantizedConv2DAttr func(optionalAttr)
29409
29410// QuantizedConv2DOutType sets the optional out_type attribute to value.
29411// If not specified, defaults to DT_QINT32
29412func QuantizedConv2DOutType(value tf.DataType) QuantizedConv2DAttr {
29413	return func(m optionalAttr) {
29414		m["out_type"] = value
29415	}
29416}
29417
29418// QuantizedConv2DDilations sets the optional dilations attribute to value.
29419//
29420// value: 1-D tensor of length 4.  The dilation factor for each dimension of
29421// `input`. If set to k > 1, there will be k-1 skipped cells between each
29422// filter element on that dimension. The dimension order is determined by the
29423// value of `data_format`, see above for details. Dilations in the batch and
29424// depth dimensions must be 1.
29425// If not specified, defaults to <i:1 i:1 i:1 i:1 >
29426func QuantizedConv2DDilations(value []int64) QuantizedConv2DAttr {
29427	return func(m optionalAttr) {
29428		m["dilations"] = value
29429	}
29430}
29431
29432// Computes a 2D convolution given quantized 4D input and filter tensors.
29433//
29434// The inputs are quantized tensors where the lowest value represents the real
29435// number of the associated minimum, and the highest represents the maximum.
29436// This means that you can only interpret the quantized output in the same way, by
29437// taking the returned minimum and maximum values into account.
29438//
29439// Arguments:
29440//
29441//	filter: filter's input_depth dimension must match input's depth dimensions.
29442//	min_input: The float value that the lowest quantized input value represents.
29443//	max_input: The float value that the highest quantized input value represents.
29444//	min_filter: The float value that the lowest quantized filter value represents.
29445//	max_filter: The float value that the highest quantized filter value represents.
29446//	strides: The stride of the sliding window for each dimension of the input
29447// tensor.
29448//	padding: The type of padding algorithm to use.
29449//
29450// Returns:
29451//	output
29452//	min_output: The float value that the lowest quantized output value represents.
29453//	max_output: The float value that the highest quantized output value represents.
29454func QuantizedConv2D(scope *Scope, input tf.Output, filter tf.Output, min_input tf.Output, max_input tf.Output, min_filter tf.Output, max_filter tf.Output, strides []int64, padding string, optional ...QuantizedConv2DAttr) (output tf.Output, min_output tf.Output, max_output tf.Output) {
29455	if scope.Err() != nil {
29456		return
29457	}
29458	attrs := map[string]interface{}{"strides": strides, "padding": padding}
29459	for _, a := range optional {
29460		a(attrs)
29461	}
29462	opspec := tf.OpSpec{
29463		Type: "QuantizedConv2D",
29464		Input: []tf.Input{
29465			input, filter, min_input, max_input, min_filter, max_filter,
29466		},
29467		Attrs: attrs,
29468	}
29469	op := scope.AddOperation(opspec)
29470	return op.Output(0), op.Output(1), op.Output(2)
29471}
29472
29473// Converts the quantized `input` tensor into a lower-precision `output`.
29474//
29475// Converts the quantized `input` tensor into a lower-precision `output`, using the
29476// output range specified with `requested_output_min` and `requested_output_max`.
29477//
29478// `[input_min, input_max]` are scalar floats that specify the range for the float
29479// interpretation of the `input` data. For example, if `input_min` is -1.0f and
29480// `input_max` is 1.0f, and we are dealing with `quint16` quantized data, then a 0
29481// value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.
29482//
29483// Arguments:
29484//
29485//	input_min: The float value that the minimum quantized input value represents.
29486//	input_max: The float value that the maximum quantized input value represents.
29487//	requested_output_min: The float value that the minimum quantized output value represents.
29488//	requested_output_max: The float value that the maximum quantized output value represents.
29489//	out_type: The type of the output. Should be a lower bit depth than Tinput.
29490//
29491// Returns:
29492//	output
29493//	output_min: The requested_output_min value is copied into this output.
29494//	output_max: The requested_output_max value is copied into this output.
29495func Requantize(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, requested_output_min tf.Output, requested_output_max tf.Output, out_type tf.DataType) (output tf.Output, output_min tf.Output, output_max tf.Output) {
29496	if scope.Err() != nil {
29497		return
29498	}
29499	attrs := map[string]interface{}{"out_type": out_type}
29500	opspec := tf.OpSpec{
29501		Type: "Requantize",
29502		Input: []tf.Input{
29503			input, input_min, input_max, requested_output_min, requested_output_max,
29504		},
29505		Attrs: attrs,
29506	}
29507	op := scope.AddOperation(opspec)
29508	return op.Output(0), op.Output(1), op.Output(2)
29509}
29510
29511// Conv2DBackpropInputAttr is an optional argument to Conv2DBackpropInput.
29512type Conv2DBackpropInputAttr func(optionalAttr)
29513
29514// Conv2DBackpropInputUseCudnnOnGpu sets the optional use_cudnn_on_gpu attribute to value.
29515// If not specified, defaults to true
29516func Conv2DBackpropInputUseCudnnOnGpu(value bool) Conv2DBackpropInputAttr {
29517	return func(m optionalAttr) {
29518		m["use_cudnn_on_gpu"] = value
29519	}
29520}
29521
29522// Conv2DBackpropInputExplicitPaddings sets the optional explicit_paddings attribute to value.
29523//
29524// value: If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith
29525// dimension, the amount of padding inserted before and after the dimension is
29526// `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If
29527// `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty.
29528// If not specified, defaults to <>
29529func Conv2DBackpropInputExplicitPaddings(value []int64) Conv2DBackpropInputAttr {
29530	return func(m optionalAttr) {
29531		m["explicit_paddings"] = value
29532	}
29533}
29534
29535// Conv2DBackpropInputDataFormat sets the optional data_format attribute to value.
29536//
29537// value: Specify the data format of the input and output data. With the
29538// default format "NHWC", the data is stored in the order of:
29539//     [batch, in_height, in_width, in_channels].
29540// Alternatively, the format could be "NCHW", the data storage order of:
29541//     [batch, in_channels, in_height, in_width].
29542// If not specified, defaults to "NHWC"
29543func Conv2DBackpropInputDataFormat(value string) Conv2DBackpropInputAttr {
29544	return func(m optionalAttr) {
29545		m["data_format"] = value
29546	}
29547}
29548
29549// Conv2DBackpropInputDilations sets the optional dilations attribute to value.
29550//
29551// value: 1-D tensor of length 4.  The dilation factor for each dimension of
29552// `input`. If set to k > 1, there will be k-1 skipped cells between each filter
29553// element on that dimension. The dimension order is determined by the value of
29554// `data_format`, see above for details. Dilations in the batch and depth
29555// dimensions must be 1.
29556// If not specified, defaults to <i:1 i:1 i:1 i:1 >
29557func Conv2DBackpropInputDilations(value []int64) Conv2DBackpropInputAttr {
29558	return func(m optionalAttr) {
29559		m["dilations"] = value
29560	}
29561}
29562
29563// Computes the gradients of convolution with respect to the input.
29564//
29565// Arguments:
29566//	input_sizes: An integer vector representing the shape of `input`,
29567// where `input` is a 4-D `[batch, height, width, channels]` tensor.
29568//	filter: 4-D with shape
29569// `[filter_height, filter_width, in_channels, out_channels]`.
29570//	out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`.
29571// Gradients w.r.t. the output of the convolution.
29572//	strides: The stride of the sliding window for each dimension of the input
29573// of the convolution. Must be in the same order as the dimension specified with
29574// format.
29575//	padding: The type of padding algorithm to use.
29576//
29577// Returns 4-D with shape `[batch, in_height, in_width, in_channels]`.  Gradient
29578// w.r.t. the input of the convolution.
29579func Conv2DBackpropInput(scope *Scope, input_sizes tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv2DBackpropInputAttr) (output tf.Output) {
29580	if scope.Err() != nil {
29581		return
29582	}
29583	attrs := map[string]interface{}{"strides": strides, "padding": padding}
29584	for _, a := range optional {
29585		a(attrs)
29586	}
29587	opspec := tf.OpSpec{
29588		Type: "Conv2DBackpropInput",
29589		Input: []tf.Input{
29590			input_sizes, filter, out_backprop,
29591		},
29592		Attrs: attrs,
29593	}
29594	op := scope.AddOperation(opspec)
29595	return op.Output(0)
29596}
29597
29598// Computes `exp(x) - 1` element-wise.
29599//
29600//   i.e. `exp(x) - 1` or `e^(x) - 1`, where `x` is the input tensor.
29601//   `e` denotes Euler's number and is approximately equal to 2.718281.
29602//
29603//   ```python
29604//   x = tf.constant(2.0)
29605//   tf.math.expm1(x) ==> 6.389056
29606//
29607//   x = tf.constant([2.0, 8.0])
29608//   tf.math.expm1(x) ==> array([6.389056, 2979.958], dtype=float32)
29609//
29610//   x = tf.constant(1 + 1j)
29611//   tf.math.expm1(x) ==> (0.46869393991588515+2.2873552871788423j)
29612//   ```
29613func Expm1(scope *Scope, x tf.Output) (y tf.Output) {
29614	if scope.Err() != nil {
29615		return
29616	}
29617	opspec := tf.OpSpec{
29618		Type: "Expm1",
29619		Input: []tf.Input{
29620			x,
29621		},
29622	}
29623	op := scope.AddOperation(opspec)
29624	return op.Output(0)
29625}
29626
29627// MultinomialAttr is an optional argument to Multinomial.
29628type MultinomialAttr func(optionalAttr)
29629
29630// MultinomialSeed sets the optional seed attribute to value.
29631//
29632// value: If either seed or seed2 is set to be non-zero, the internal random number
29633// generator is seeded by the given seed.  Otherwise, a random seed is used.
29634// If not specified, defaults to 0
29635func MultinomialSeed(value int64) MultinomialAttr {
29636	return func(m optionalAttr) {
29637		m["seed"] = value
29638	}
29639}
29640
29641// MultinomialSeed2 sets the optional seed2 attribute to value.
29642//
29643// value: A second seed to avoid seed collision.
29644// If not specified, defaults to 0
29645func MultinomialSeed2(value int64) MultinomialAttr {
29646	return func(m optionalAttr) {
29647		m["seed2"] = value
29648	}
29649}
29650
29651// MultinomialOutputDtype sets the optional output_dtype attribute to value.
29652// If not specified, defaults to DT_INT64
29653func MultinomialOutputDtype(value tf.DataType) MultinomialAttr {
29654	return func(m optionalAttr) {
29655		m["output_dtype"] = value
29656	}
29657}
29658
29659// Draws samples from a multinomial distribution.
29660//
29661// Arguments:
29662//	logits: 2-D Tensor with shape `[batch_size, num_classes]`.  Each slice `[i, :]`
29663// represents the unnormalized log probabilities for all classes.
29664//	num_samples: 0-D.  Number of independent samples to draw for each row slice.
29665//
29666// Returns 2-D Tensor with shape `[batch_size, num_samples]`.  Each slice `[i, :]`
29667// contains the drawn class labels with range `[0, num_classes)`.
29668func Multinomial(scope *Scope, logits tf.Output, num_samples tf.Output, optional ...MultinomialAttr) (output tf.Output) {
29669	if scope.Err() != nil {
29670		return
29671	}
29672	attrs := map[string]interface{}{}
29673	for _, a := range optional {
29674		a(attrs)
29675	}
29676	opspec := tf.OpSpec{
29677		Type: "Multinomial",
29678		Input: []tf.Input{
29679			logits, num_samples,
29680		},
29681		Attrs: attrs,
29682	}
29683	op := scope.AddOperation(opspec)
29684	return op.Output(0)
29685}
29686
29687// NonDeterministicIntsAttr is an optional argument to NonDeterministicInts.
29688type NonDeterministicIntsAttr func(optionalAttr)
29689
29690// NonDeterministicIntsDtype sets the optional dtype attribute to value.
29691//
29692// value: The type of the output.
29693// If not specified, defaults to DT_INT64
29694func NonDeterministicIntsDtype(value tf.DataType) NonDeterministicIntsAttr {
29695	return func(m optionalAttr) {
29696		m["dtype"] = value
29697	}
29698}
29699
29700// Non-deterministically generates some integers.
29701//
29702// This op may use some OS-provided source of non-determinism (e.g. an RNG), so each execution will give different results.
29703//
29704// Arguments:
29705//	shape: The shape of the output tensor.
29706//
29707// Returns Non-deterministic integer values with specified shape.
29708func NonDeterministicInts(scope *Scope, shape tf.Output, optional ...NonDeterministicIntsAttr) (output tf.Output) {
29709	if scope.Err() != nil {
29710		return
29711	}
29712	attrs := map[string]interface{}{}
29713	for _, a := range optional {
29714		a(attrs)
29715	}
29716	opspec := tf.OpSpec{
29717		Type: "NonDeterministicInts",
29718		Input: []tf.Input{
29719			shape,
29720		},
29721		Attrs: attrs,
29722	}
29723	op := scope.AddOperation(opspec)
29724	return op.Output(0)
29725}
29726
29727// FakeQuantWithMinMaxVarsPerChannelAttr is an optional argument to FakeQuantWithMinMaxVarsPerChannel.
29728type FakeQuantWithMinMaxVarsPerChannelAttr func(optionalAttr)
29729
29730// FakeQuantWithMinMaxVarsPerChannelNumBits sets the optional num_bits attribute to value.
29731// If not specified, defaults to 8
29732func FakeQuantWithMinMaxVarsPerChannelNumBits(value int64) FakeQuantWithMinMaxVarsPerChannelAttr {
29733	return func(m optionalAttr) {
29734		m["num_bits"] = value
29735	}
29736}
29737
29738// FakeQuantWithMinMaxVarsPerChannelNarrowRange sets the optional narrow_range attribute to value.
29739// If not specified, defaults to false
29740func FakeQuantWithMinMaxVarsPerChannelNarrowRange(value bool) FakeQuantWithMinMaxVarsPerChannelAttr {
29741	return func(m optionalAttr) {
29742		m["narrow_range"] = value
29743	}
29744}
29745
29746// Fake-quantize the 'inputs' tensor of type float via per-channel floats
29747//
29748// Fake-quantize the `inputs` tensor of type float per-channel and one of the
29749// shapes: `[d]`, `[b, d]` `[b, h, w, d]` via per-channel floats `min` and `max`
29750// of shape `[d]` to `outputs` tensor of same shape as `inputs`.
29751//
29752// Attributes
29753//
29754// *   `[min; max]` define the clamping range for the `inputs` data.
29755// *   `inputs` values are quantized into the quantization range (
29756// `[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]`
29757// when it is true) and then de-quantized and output as floats in `[min; max]`
29758// interval.
29759// *   `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.
29760//
29761// Before quantization, `min` and `max` values are adjusted with the following
29762// logic.
29763// It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values,
29764// the behavior can be unexpected:
29765//
29766// *   If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`.
29767// *   If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`.
29768// *   If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `,
29769// `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`.
29770//
29771// This operation has a gradient and thus allows for training `min` and `max`
29772// values.
29773func FakeQuantWithMinMaxVarsPerChannel(scope *Scope, inputs tf.Output, min tf.Output, max tf.Output, optional ...FakeQuantWithMinMaxVarsPerChannelAttr) (outputs tf.Output) {
29774	if scope.Err() != nil {
29775		return
29776	}
29777	attrs := map[string]interface{}{}
29778	for _, a := range optional {
29779		a(attrs)
29780	}
29781	opspec := tf.OpSpec{
29782		Type: "FakeQuantWithMinMaxVarsPerChannel",
29783		Input: []tf.Input{
29784			inputs, min, max,
29785		},
29786		Attrs: attrs,
29787	}
29788	op := scope.AddOperation(opspec)
29789	return op.Output(0)
29790}
29791
29792// Checks whether a resource handle-based variable has been initialized.
29793//
29794// Arguments:
29795//	resource: the input resource handle.
29796//
29797// Returns a scalar boolean which is true if the variable has been
29798// initialized.
29799func VarIsInitializedOp(scope *Scope, resource tf.Output) (is_initialized tf.Output) {
29800	if scope.Err() != nil {
29801		return
29802	}
29803	opspec := tf.OpSpec{
29804		Type: "VarIsInitializedOp",
29805		Input: []tf.Input{
29806			resource,
29807		},
29808	}
29809	op := scope.AddOperation(opspec)
29810	return op.Output(0)
29811}
29812
29813// QueueDequeueManyV2Attr is an optional argument to QueueDequeueManyV2.
29814type QueueDequeueManyV2Attr func(optionalAttr)
29815
29816// QueueDequeueManyV2TimeoutMs sets the optional timeout_ms attribute to value.
29817//
29818// value: If the queue has fewer than n elements, this operation
29819// will block for up to timeout_ms milliseconds.
29820// Note: This option is not supported yet.
29821// If not specified, defaults to -1
29822func QueueDequeueManyV2TimeoutMs(value int64) QueueDequeueManyV2Attr {
29823	return func(m optionalAttr) {
29824		m["timeout_ms"] = value
29825	}
29826}
29827
29828// Dequeues `n` tuples of one or more tensors from the given queue.
29829//
29830// If the queue is closed and there are fewer than `n` elements, then an
29831// OutOfRange error is returned.
29832//
29833// This operation concatenates queue-element component tensors along the
29834// 0th dimension to make a single component tensor.  All of the components
29835// in the dequeued tuple will have size `n` in the 0th dimension.
29836//
29837// This operation has `k` outputs, where `k` is the number of components in
29838// the tuples stored in the given queue, and output `i` is the ith
29839// component of the dequeued tuple.
29840//
29841// N.B. If the queue is empty, this operation will block until `n` elements
29842// have been dequeued (or 'timeout_ms' elapses, if specified).
29843//
29844// Arguments:
29845//	handle: The handle to a queue.
29846//	n: The number of tuples to dequeue.
29847//	component_types: The type of each component in a tuple.
29848//
29849// Returns One or more tensors that were dequeued as a tuple.
29850func QueueDequeueManyV2(scope *Scope, handle tf.Output, n tf.Output, component_types []tf.DataType, optional ...QueueDequeueManyV2Attr) (components []tf.Output) {
29851	if scope.Err() != nil {
29852		return
29853	}
29854	attrs := map[string]interface{}{"component_types": component_types}
29855	for _, a := range optional {
29856		a(attrs)
29857	}
29858	opspec := tf.OpSpec{
29859		Type: "QueueDequeueManyV2",
29860		Input: []tf.Input{
29861			handle, n,
29862		},
29863		Attrs: attrs,
29864	}
29865	op := scope.AddOperation(opspec)
29866	if scope.Err() != nil {
29867		return
29868	}
29869	var idx int
29870	var err error
29871	if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
29872		scope.UpdateErr("QueueDequeueManyV2", err)
29873		return
29874	}
29875	return components
29876}
29877
29878// Returns x * y element-wise. Returns zero if y is zero, even if x if infinite or NaN.
29879//
29880// *NOTE*: `MulNoNan` supports broadcasting. More about broadcasting
29881// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
29882func MulNoNan(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
29883	if scope.Err() != nil {
29884		return
29885	}
29886	opspec := tf.OpSpec{
29887		Type: "MulNoNan",
29888		Input: []tf.Input{
29889			x, y,
29890		},
29891	}
29892	op := scope.AddOperation(opspec)
29893	return op.Output(0)
29894}
29895
29896// StatelessRandomUniformFullIntV2Attr is an optional argument to StatelessRandomUniformFullIntV2.
29897type StatelessRandomUniformFullIntV2Attr func(optionalAttr)
29898
29899// StatelessRandomUniformFullIntV2Dtype sets the optional dtype attribute to value.
29900//
29901// value: The type of the output.
29902// If not specified, defaults to DT_UINT64
29903func StatelessRandomUniformFullIntV2Dtype(value tf.DataType) StatelessRandomUniformFullIntV2Attr {
29904	return func(m optionalAttr) {
29905		m["dtype"] = value
29906	}
29907}
29908
29909// Outputs deterministic pseudorandom random integers from a uniform distribution.
29910//
29911// The generated values are uniform integers covering the whole range of `dtype`.
29912//
29913// The outputs are a deterministic function of `shape`, `key`, `counter` and `alg`.
29914//
29915// Arguments:
29916//	shape: The shape of the output tensor.
29917//	key: Key for the counter-based RNG algorithm (shape uint64[1]).
29918//	counter: Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used.
29919//	alg: The RNG algorithm (shape int32[]).
29920//
29921// Returns Random values with specified shape.
29922func StatelessRandomUniformFullIntV2(scope *Scope, shape tf.Output, key tf.Output, counter tf.Output, alg tf.Output, optional ...StatelessRandomUniformFullIntV2Attr) (output tf.Output) {
29923	if scope.Err() != nil {
29924		return
29925	}
29926	attrs := map[string]interface{}{}
29927	for _, a := range optional {
29928		a(attrs)
29929	}
29930	opspec := tf.OpSpec{
29931		Type: "StatelessRandomUniformFullIntV2",
29932		Input: []tf.Input{
29933			shape, key, counter, alg,
29934		},
29935		Attrs: attrs,
29936	}
29937	op := scope.AddOperation(opspec)
29938	return op.Output(0)
29939}
29940
29941// Returns a batched diagonal tensor with a given batched diagonal values.
29942//
29943// Given a `diagonal`, this operation returns a tensor with the `diagonal` and
29944// everything else padded with zeros. The diagonal is computed as follows:
29945//
29946// Assume `diagonal` has `k` dimensions `[I, J, K, ..., N]`, then the output is a
29947// tensor of rank `k+1` with dimensions [I, J, K, ..., N, N]` where:
29948//
29949// `output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`.
29950//
29951// For example:
29952//
29953// ```
29954// # 'diagonal' is [[1, 2, 3, 4], [5, 6, 7, 8]]
29955//
29956// and diagonal.shape = (2, 4)
29957//
29958// tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0]
29959//                                      [0, 2, 0, 0]
29960//                                      [0, 0, 3, 0]
29961//                                      [0, 0, 0, 4]],
29962//                                     [[5, 0, 0, 0]
29963//                                      [0, 6, 0, 0]
29964//                                      [0, 0, 7, 0]
29965//                                      [0, 0, 0, 8]]]
29966//
29967// which has shape (2, 4, 4)
29968// ```
29969//
29970// Arguments:
29971//	diagonal: Rank `k`, where `k >= 1`.
29972//
29973// Returns Rank `k+1`, with `output.shape = diagonal.shape + [diagonal.shape[-1]]`.
29974func MatrixDiag(scope *Scope, diagonal tf.Output) (output tf.Output) {
29975	if scope.Err() != nil {
29976		return
29977	}
29978	opspec := tf.OpSpec{
29979		Type: "MatrixDiag",
29980		Input: []tf.Input{
29981			diagonal,
29982		},
29983	}
29984	op := scope.AddOperation(opspec)
29985	return op.Output(0)
29986}
29987
29988// StatelessTruncatedNormalAttr is an optional argument to StatelessTruncatedNormal.
29989type StatelessTruncatedNormalAttr func(optionalAttr)
29990
29991// StatelessTruncatedNormalDtype sets the optional dtype attribute to value.
29992//
29993// value: The type of the output.
29994// If not specified, defaults to DT_FLOAT
29995func StatelessTruncatedNormalDtype(value tf.DataType) StatelessTruncatedNormalAttr {
29996	return func(m optionalAttr) {
29997		m["dtype"] = value
29998	}
29999}
30000
30001// Outputs deterministic pseudorandom values from a truncated normal distribution.
30002//
30003// The generated values follow a normal distribution with mean 0 and standard
30004// deviation 1, except that values whose magnitude is more than 2 standard
30005// deviations from the mean are dropped and re-picked.
30006//
30007// The outputs are a deterministic function of `shape` and `seed`.
30008//
30009// Arguments:
30010//	shape: The shape of the output tensor.
30011//	seed: 2 seeds (shape [2]).
30012//
30013// Returns Random values with specified shape.
30014func StatelessTruncatedNormal(scope *Scope, shape tf.Output, seed tf.Output, optional ...StatelessTruncatedNormalAttr) (output tf.Output) {
30015	if scope.Err() != nil {
30016		return
30017	}
30018	attrs := map[string]interface{}{}
30019	for _, a := range optional {
30020		a(attrs)
30021	}
30022	opspec := tf.OpSpec{
30023		Type: "StatelessTruncatedNormal",
30024		Input: []tf.Input{
30025			shape, seed,
30026		},
30027		Attrs: attrs,
30028	}
30029	op := scope.AddOperation(opspec)
30030	return op.Output(0)
30031}
30032
30033// BiasAddGradAttr is an optional argument to BiasAddGrad.
30034type BiasAddGradAttr func(optionalAttr)
30035
30036// BiasAddGradDataFormat sets the optional data_format attribute to value.
30037//
30038// value: Specify the data format of the input and output data. With the
30039// default format "NHWC", the bias tensor will be added to the last dimension
30040// of the value tensor.
30041// Alternatively, the format could be "NCHW", the data storage order of:
30042//     [batch, in_channels, in_height, in_width].
30043// The tensor will be added to "in_channels", the third-to-the-last
30044//     dimension.
30045// If not specified, defaults to "NHWC"
30046func BiasAddGradDataFormat(value string) BiasAddGradAttr {
30047	return func(m optionalAttr) {
30048		m["data_format"] = value
30049	}
30050}
30051
30052// The backward operation for "BiasAdd" on the "bias" tensor.
30053//
30054// It accumulates all the values from out_backprop into the feature dimension.
30055// For NHWC data format, the feature dimension is the last. For NCHW data format,
30056// the feature dimension is the third-to-last.
30057//
30058// Arguments:
30059//	out_backprop: Any number of dimensions.
30060//
30061// Returns 1-D with size the feature dimension of `out_backprop`.
30062func BiasAddGrad(scope *Scope, out_backprop tf.Output, optional ...BiasAddGradAttr) (output tf.Output) {
30063	if scope.Err() != nil {
30064		return
30065	}
30066	attrs := map[string]interface{}{}
30067	for _, a := range optional {
30068		a(attrs)
30069	}
30070	opspec := tf.OpSpec{
30071		Type: "BiasAddGrad",
30072		Input: []tf.Input{
30073			out_backprop,
30074		},
30075		Attrs: attrs,
30076	}
30077	op := scope.AddOperation(opspec)
30078	return op.Output(0)
30079}
30080
30081// Op that executes a program with optional in-place variable updates.
30082//
30083// It (optionally) reads device variables, loads and executes a TPU program on a
30084// TPU device, and then (optionally) in-place updates variables using the program
30085// outputs, as specified in attributes device_var_reads_indices (program input
30086// indices from directly reading variables) and device_var_updates_indices (program
30087// output indices used to update variables, -1 means no-update/read-only). Such
30088// program outputs are consumed by these variables will not appear in the op
30089// output. For the internal use of the distributed TPU compiler.
30090func TPUExecuteAndUpdateVariables(scope *Scope, args []tf.Output, key tf.Output, Tresults []tf.DataType, device_var_reads_indices []int64, device_var_updates_indices []int64) (results []tf.Output) {
30091	if scope.Err() != nil {
30092		return
30093	}
30094	attrs := map[string]interface{}{"Tresults": Tresults, "device_var_reads_indices": device_var_reads_indices, "device_var_updates_indices": device_var_updates_indices}
30095	opspec := tf.OpSpec{
30096		Type: "TPUExecuteAndUpdateVariables",
30097		Input: []tf.Input{
30098			tf.OutputList(args), key,
30099		},
30100		Attrs: attrs,
30101	}
30102	op := scope.AddOperation(opspec)
30103	if scope.Err() != nil {
30104		return
30105	}
30106	var idx int
30107	var err error
30108	if results, idx, err = makeOutputList(op, idx, "results"); err != nil {
30109		scope.UpdateErr("TPUExecuteAndUpdateVariables", err)
30110		return
30111	}
30112	return results
30113}
30114
30115// TensorArrayV2Attr is an optional argument to TensorArrayV2.
30116type TensorArrayV2Attr func(optionalAttr)
30117
30118// TensorArrayV2ElementShape sets the optional element_shape attribute to value.
30119// If not specified, defaults to <unknown_rank:true >
30120func TensorArrayV2ElementShape(value tf.Shape) TensorArrayV2Attr {
30121	return func(m optionalAttr) {
30122		m["element_shape"] = value
30123	}
30124}
30125
30126// TensorArrayV2DynamicSize sets the optional dynamic_size attribute to value.
30127// If not specified, defaults to false
30128func TensorArrayV2DynamicSize(value bool) TensorArrayV2Attr {
30129	return func(m optionalAttr) {
30130		m["dynamic_size"] = value
30131	}
30132}
30133
30134// TensorArrayV2ClearAfterRead sets the optional clear_after_read attribute to value.
30135// If not specified, defaults to true
30136func TensorArrayV2ClearAfterRead(value bool) TensorArrayV2Attr {
30137	return func(m optionalAttr) {
30138		m["clear_after_read"] = value
30139	}
30140}
30141
30142// TensorArrayV2TensorArrayName sets the optional tensor_array_name attribute to value.
30143// If not specified, defaults to ""
30144func TensorArrayV2TensorArrayName(value string) TensorArrayV2Attr {
30145	return func(m optionalAttr) {
30146		m["tensor_array_name"] = value
30147	}
30148}
30149
30150// Deprecated. Use TensorArrayV3
30151//
30152// DEPRECATED at GraphDef version 26: Use TensorArrayV3
30153func TensorArrayV2(scope *Scope, size tf.Output, dtype tf.DataType, optional ...TensorArrayV2Attr) (handle tf.Output) {
30154	if scope.Err() != nil {
30155		return
30156	}
30157	attrs := map[string]interface{}{"dtype": dtype}
30158	for _, a := range optional {
30159		a(attrs)
30160	}
30161	opspec := tf.OpSpec{
30162		Type: "TensorArrayV2",
30163		Input: []tf.Input{
30164			size,
30165		},
30166		Attrs: attrs,
30167	}
30168	op := scope.AddOperation(opspec)
30169	return op.Output(0)
30170}
30171
30172// FakeQuantWithMinMaxVarsPerChannelGradientAttr is an optional argument to FakeQuantWithMinMaxVarsPerChannelGradient.
30173type FakeQuantWithMinMaxVarsPerChannelGradientAttr func(optionalAttr)
30174
30175// FakeQuantWithMinMaxVarsPerChannelGradientNumBits sets the optional num_bits attribute to value.
30176//
30177// value: The bitwidth of the quantization; between 2 and 16, inclusive.
30178// If not specified, defaults to 8
30179func FakeQuantWithMinMaxVarsPerChannelGradientNumBits(value int64) FakeQuantWithMinMaxVarsPerChannelGradientAttr {
30180	return func(m optionalAttr) {
30181		m["num_bits"] = value
30182	}
30183}
30184
30185// FakeQuantWithMinMaxVarsPerChannelGradientNarrowRange sets the optional narrow_range attribute to value.
30186//
30187// value: Whether to quantize into 2^num_bits - 1 distinct values.
30188// If not specified, defaults to false
30189func FakeQuantWithMinMaxVarsPerChannelGradientNarrowRange(value bool) FakeQuantWithMinMaxVarsPerChannelGradientAttr {
30190	return func(m optionalAttr) {
30191		m["narrow_range"] = value
30192	}
30193}
30194
30195// Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation.
30196//
30197// Arguments:
30198//	gradients: Backpropagated gradients above the FakeQuantWithMinMaxVars operation,
30199// shape one of: `[d]`, `[b, d]`,  `[b, h, w, d]`.
30200//	inputs: Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape
30201//   same as `gradients`.
30202// min, max: Quantization interval, floats of shape `[d]`.
30203//
30204//
30205//
30206// Returns:
30207//	backprops_wrt_input: Backpropagated gradients w.r.t. inputs, shape same as
30208// `inputs`:
30209//   `gradients * (inputs >= min && inputs <= max)`.
30210//	backprop_wrt_min: Backpropagated gradients w.r.t. min parameter, shape `[d]`:
30211// `sum_per_d(gradients * (inputs < min))`.
30212//	backprop_wrt_max: Backpropagated gradients w.r.t. max parameter, shape `[d]`:
30213// `sum_per_d(gradients * (inputs > max))`.
30214func FakeQuantWithMinMaxVarsPerChannelGradient(scope *Scope, gradients tf.Output, inputs tf.Output, min tf.Output, max tf.Output, optional ...FakeQuantWithMinMaxVarsPerChannelGradientAttr) (backprops_wrt_input tf.Output, backprop_wrt_min tf.Output, backprop_wrt_max tf.Output) {
30215	if scope.Err() != nil {
30216		return
30217	}
30218	attrs := map[string]interface{}{}
30219	for _, a := range optional {
30220		a(attrs)
30221	}
30222	opspec := tf.OpSpec{
30223		Type: "FakeQuantWithMinMaxVarsPerChannelGradient",
30224		Input: []tf.Input{
30225			gradients, inputs, min, max,
30226		},
30227		Attrs: attrs,
30228	}
30229	op := scope.AddOperation(opspec)
30230	return op.Output(0), op.Output(1), op.Output(2)
30231}
30232
30233// PrintV2Attr is an optional argument to PrintV2.
30234type PrintV2Attr func(optionalAttr)
30235
30236// PrintV2OutputStream sets the optional output_stream attribute to value.
30237//
30238// value: A string specifying the output stream or logging level to print to.
30239// If not specified, defaults to "stderr"
30240func PrintV2OutputStream(value string) PrintV2Attr {
30241	return func(m optionalAttr) {
30242		m["output_stream"] = value
30243	}
30244}
30245
30246// PrintV2End sets the optional end attribute to value.
30247// If not specified, defaults to "\n"
30248func PrintV2End(value string) PrintV2Attr {
30249	return func(m optionalAttr) {
30250		m["end"] = value
30251	}
30252}
30253
30254// Prints a string scalar.
30255//
30256// Prints a string scalar to the desired output_stream.
30257//
30258// Arguments:
30259//	input: The string scalar to print.
30260//
30261// Returns the created operation.
30262func PrintV2(scope *Scope, input tf.Output, optional ...PrintV2Attr) (o *tf.Operation) {
30263	if scope.Err() != nil {
30264		return
30265	}
30266	attrs := map[string]interface{}{}
30267	for _, a := range optional {
30268		a(attrs)
30269	}
30270	opspec := tf.OpSpec{
30271		Type: "PrintV2",
30272		Input: []tf.Input{
30273			input,
30274		},
30275		Attrs: attrs,
30276	}
30277	return scope.AddOperation(opspec)
30278}
30279
30280// Transforms a serialized tensorflow.TensorProto proto into a Tensor.
30281//
30282// Arguments:
30283//	serialized: A scalar string containing a serialized TensorProto proto.
30284//	out_type: The type of the serialized tensor.  The provided type must match the
30285// type of the serialized tensor and no implicit conversion will take place.
30286//
30287// Returns A Tensor of type `out_type`.
30288func ParseTensor(scope *Scope, serialized tf.Output, out_type tf.DataType) (output tf.Output) {
30289	if scope.Err() != nil {
30290		return
30291	}
30292	attrs := map[string]interface{}{"out_type": out_type}
30293	opspec := tf.OpSpec{
30294		Type: "ParseTensor",
30295		Input: []tf.Input{
30296			serialized,
30297		},
30298		Attrs: attrs,
30299	}
30300	op := scope.AddOperation(opspec)
30301	return op.Output(0)
30302}
30303
30304// LowerBoundAttr is an optional argument to LowerBound.
30305type LowerBoundAttr func(optionalAttr)
30306
30307// LowerBoundOutType sets the optional out_type attribute to value.
30308// If not specified, defaults to DT_INT32
30309func LowerBoundOutType(value tf.DataType) LowerBoundAttr {
30310	return func(m optionalAttr) {
30311		m["out_type"] = value
30312	}
30313}
30314
30315// Applies lower_bound(sorted_search_values, values) along each row.
30316//
30317// Each set of rows with the same index in (sorted_inputs, values) is treated
30318// independently.  The resulting row is the equivalent of calling
30319// `np.searchsorted(sorted_inputs, values, side='left')`.
30320//
30321// The result is not a global index to the entire
30322// `Tensor`, but rather just the index in the last dimension.
30323//
30324// A 2-D example:
30325//   sorted_sequence = [[0, 3, 9, 9, 10],
30326//                      [1, 2, 3, 4, 5]]
30327//   values = [[2, 4, 9],
30328//             [0, 2, 6]]
30329//
30330//   result = LowerBound(sorted_sequence, values)
30331//
30332//   result == [[1, 2, 2],
30333//              [0, 1, 5]]
30334//
30335// Arguments:
30336//	sorted_inputs: 2-D Tensor where each row is ordered.
30337//	values: 2-D Tensor with the same numbers of rows as `sorted_search_values`. Contains
30338// the values that will be searched for in `sorted_search_values`.
30339//
30340// Returns A `Tensor` with the same shape as `values`.  It contains the first scalar index
30341// into the last dimension where values can be inserted without changing the
30342// ordered property.
30343func LowerBound(scope *Scope, sorted_inputs tf.Output, values tf.Output, optional ...LowerBoundAttr) (output tf.Output) {
30344	if scope.Err() != nil {
30345		return
30346	}
30347	attrs := map[string]interface{}{}
30348	for _, a := range optional {
30349		a(attrs)
30350	}
30351	opspec := tf.OpSpec{
30352		Type: "LowerBound",
30353		Input: []tf.Input{
30354			sorted_inputs, values,
30355		},
30356		Attrs: attrs,
30357	}
30358	op := scope.AddOperation(opspec)
30359	return op.Output(0)
30360}
30361
30362// Returns the truth value of (x > y) element-wise.
30363//
30364// *NOTE*: `Greater` supports broadcasting. More about broadcasting
30365// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
30366//
30367// Example:
30368//
30369// ```python
30370// x = tf.constant([5, 4, 6])
30371// y = tf.constant([5, 2, 5])
30372// tf.math.greater(x, y) ==> [False, True, True]
30373//
30374// x = tf.constant([5, 4, 6])
30375// y = tf.constant([5])
30376// tf.math.greater(x, y) ==> [False, False, True]
30377// ```
30378func Greater(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
30379	if scope.Err() != nil {
30380		return
30381	}
30382	opspec := tf.OpSpec{
30383		Type: "Greater",
30384		Input: []tf.Input{
30385			x, y,
30386		},
30387	}
30388	op := scope.AddOperation(opspec)
30389	return op.Output(0)
30390}
30391
30392// StatelessRandomUniformFullIntAttr is an optional argument to StatelessRandomUniformFullInt.
30393type StatelessRandomUniformFullIntAttr func(optionalAttr)
30394
30395// StatelessRandomUniformFullIntDtype sets the optional dtype attribute to value.
30396//
30397// value: The type of the output.
30398// If not specified, defaults to DT_UINT64
30399func StatelessRandomUniformFullIntDtype(value tf.DataType) StatelessRandomUniformFullIntAttr {
30400	return func(m optionalAttr) {
30401		m["dtype"] = value
30402	}
30403}
30404
30405// Outputs deterministic pseudorandom random integers from a uniform distribution.
30406//
30407// The generated values are uniform integers covering the whole range of `dtype`.
30408//
30409// The outputs are a deterministic function of `shape` and `seed`.
30410//
30411// Arguments:
30412//	shape: The shape of the output tensor.
30413//	seed: 2 seeds (shape [2]).
30414//
30415// Returns Random values with specified shape.
30416func StatelessRandomUniformFullInt(scope *Scope, shape tf.Output, seed tf.Output, optional ...StatelessRandomUniformFullIntAttr) (output tf.Output) {
30417	if scope.Err() != nil {
30418		return
30419	}
30420	attrs := map[string]interface{}{}
30421	for _, a := range optional {
30422		a(attrs)
30423	}
30424	opspec := tf.OpSpec{
30425		Type: "StatelessRandomUniformFullInt",
30426		Input: []tf.Input{
30427			shape, seed,
30428		},
30429		Attrs: attrs,
30430	}
30431	op := scope.AddOperation(opspec)
30432	return op.Output(0)
30433}
30434
30435// Returns element-wise remainder of division. This emulates C semantics in that
30436//
30437// the result here is consistent with a truncating divide. E.g. `truncate(x / y) *
30438// y + truncate_mod(x, y) = x`.
30439//
30440// *NOTE*: `TruncateMod` supports broadcasting. More about broadcasting
30441// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
30442func TruncateMod(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
30443	if scope.Err() != nil {
30444		return
30445	}
30446	opspec := tf.OpSpec{
30447		Type: "TruncateMod",
30448		Input: []tf.Input{
30449			x, y,
30450		},
30451	}
30452	op := scope.AddOperation(opspec)
30453	return op.Output(0)
30454}
30455
30456// Calculates the gradient of the SparseMatrixSoftmax op.
30457//
30458// Arguments:
30459//	softmax: A CSRSparseMatrix.
30460//	grad_softmax: The gradient of `softmax`.
30461//
30462//
30463// Returns The output gradient.
30464func SparseMatrixSoftmaxGrad(scope *Scope, softmax tf.Output, grad_softmax tf.Output, type_ tf.DataType) (gradient tf.Output) {
30465	if scope.Err() != nil {
30466		return
30467	}
30468	attrs := map[string]interface{}{"type": type_}
30469	opspec := tf.OpSpec{
30470		Type: "SparseMatrixSoftmaxGrad",
30471		Input: []tf.Input{
30472			softmax, grad_softmax,
30473		},
30474		Attrs: attrs,
30475	}
30476	op := scope.AddOperation(opspec)
30477	return op.Output(0)
30478}
30479
30480// Computes the GRU cell back-propagation for 1 time step.
30481//
30482// Args
30483//     x: Input to the GRU cell.
30484//     h_prev: State input from the previous GRU cell.
30485//     w_ru: Weight matrix for the reset and update gate.
30486//     w_c: Weight matrix for the cell connection gate.
30487//     b_ru: Bias vector for the reset and update gate.
30488//     b_c: Bias vector for the cell connection gate.
30489//     r: Output of the reset gate.
30490//     u: Output of the update gate.
30491//     c: Output of the cell connection gate.
30492//     d_h: Gradients of the h_new wrt to objective function.
30493//
30494// Returns
30495//     d_x: Gradients of the x wrt to objective function.
30496//     d_h_prev: Gradients of the h wrt to objective function.
30497//     d_c_bar Gradients of the c_bar wrt to objective function.
30498//     d_r_bar_u_bar Gradients of the r_bar & u_bar wrt to objective function.
30499//
30500// This kernel op implements the following mathematical equations:
30501//
30502// Note on notation of the variables:
30503//
30504// Concatenation of a and b is represented by a_b
30505// Element-wise dot product of a and b is represented by ab
30506// Element-wise dot product is represented by \circ
30507// Matrix multiplication is represented by *
30508//
30509// Additional notes for clarity:
30510//
30511// `w_ru` can be segmented into 4 different matrices.
30512// ```
30513// w_ru = [w_r_x w_u_x
30514//         w_r_h_prev w_u_h_prev]
30515// ```
30516// Similarly, `w_c` can be segmented into 2 different matrices.
30517// ```
30518// w_c = [w_c_x w_c_h_prevr]
30519// ```
30520// Same goes for biases.
30521// ```
30522// b_ru = [b_ru_x b_ru_h]
30523// b_c = [b_c_x b_c_h]
30524// ```
30525// Another note on notation:
30526// ```
30527// d_x = d_x_component_1 + d_x_component_2
30528//
30529// where d_x_component_1 = d_r_bar * w_r_x^T + d_u_bar * w_r_x^T
30530// and d_x_component_2 = d_c_bar * w_c_x^T
30531//
30532// d_h_prev = d_h_prev_component_1 + d_h_prevr \circ r + d_h \circ u
30533// where d_h_prev_componenet_1 = d_r_bar * w_r_h_prev^T + d_u_bar * w_r_h_prev^T
30534// ```
30535//
30536// Mathematics behind the Gradients below:
30537// ```
30538// d_c_bar = d_h \circ (1-u) \circ (1-c \circ c)
30539// d_u_bar = d_h \circ (h-c) \circ u \circ (1-u)
30540//
30541// d_r_bar_u_bar = [d_r_bar d_u_bar]
30542//
30543// [d_x_component_1 d_h_prev_component_1] = d_r_bar_u_bar * w_ru^T
30544//
30545// [d_x_component_2 d_h_prevr] = d_c_bar * w_c^T
30546//
30547// d_x = d_x_component_1 + d_x_component_2
30548//
30549// d_h_prev = d_h_prev_component_1 + d_h_prevr \circ r + u
30550// ```
30551// Below calculation is performed in the python wrapper for the Gradients
30552// (not in the gradient kernel.)
30553// ```
30554// d_w_ru = x_h_prevr^T * d_c_bar
30555//
30556// d_w_c = x_h_prev^T * d_r_bar_u_bar
30557//
30558// d_b_ru = sum of d_r_bar_u_bar along axis = 0
30559//
30560// d_b_c = sum of d_c_bar along axis = 0
30561// ```
30562func GRUBlockCellGrad(scope *Scope, x tf.Output, h_prev tf.Output, w_ru tf.Output, w_c tf.Output, b_ru tf.Output, b_c tf.Output, r tf.Output, u tf.Output, c tf.Output, d_h tf.Output) (d_x tf.Output, d_h_prev tf.Output, d_c_bar tf.Output, d_r_bar_u_bar tf.Output) {
30563	if scope.Err() != nil {
30564		return
30565	}
30566	opspec := tf.OpSpec{
30567		Type: "GRUBlockCellGrad",
30568		Input: []tf.Input{
30569			x, h_prev, w_ru, w_c, b_ru, b_c, r, u, c, d_h,
30570		},
30571	}
30572	op := scope.AddOperation(opspec)
30573	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
30574}
30575
30576// FractionalMaxPoolAttr is an optional argument to FractionalMaxPool.
30577type FractionalMaxPoolAttr func(optionalAttr)
30578
30579// FractionalMaxPoolPseudoRandom sets the optional pseudo_random attribute to value.
30580//
30581// value: When set to True, generates the pooling sequence in a
30582// pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin
30583// Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for
30584// difference between pseudorandom and random.
30585// If not specified, defaults to false
30586func FractionalMaxPoolPseudoRandom(value bool) FractionalMaxPoolAttr {
30587	return func(m optionalAttr) {
30588		m["pseudo_random"] = value
30589	}
30590}
30591
30592// FractionalMaxPoolOverlapping sets the optional overlapping attribute to value.
30593//
30594// value: When set to True, it means when pooling, the values at the boundary
30595// of adjacent pooling cells are used by both cells. For example:
30596//
30597// `index  0  1  2  3  4`
30598//
30599// `value  20 5  16 3  7`
30600//
30601// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
30602// The result would be [20, 16] for fractional max pooling.
30603// If not specified, defaults to false
30604func FractionalMaxPoolOverlapping(value bool) FractionalMaxPoolAttr {
30605	return func(m optionalAttr) {
30606		m["overlapping"] = value
30607	}
30608}
30609
30610// FractionalMaxPoolDeterministic sets the optional deterministic attribute to value.
30611//
30612// value: When set to True, a fixed pooling region will be used when
30613// iterating over a FractionalMaxPool node in the computation graph. Mainly used
30614// in unit test to make FractionalMaxPool deterministic.
30615// If not specified, defaults to false
30616func FractionalMaxPoolDeterministic(value bool) FractionalMaxPoolAttr {
30617	return func(m optionalAttr) {
30618		m["deterministic"] = value
30619	}
30620}
30621
30622// FractionalMaxPoolSeed sets the optional seed attribute to value.
30623//
30624// value: If either seed or seed2 are set to be non-zero, the random number
30625// generator is seeded by the given seed.  Otherwise, it is seeded by a
30626// random seed.
30627// If not specified, defaults to 0
30628func FractionalMaxPoolSeed(value int64) FractionalMaxPoolAttr {
30629	return func(m optionalAttr) {
30630		m["seed"] = value
30631	}
30632}
30633
30634// FractionalMaxPoolSeed2 sets the optional seed2 attribute to value.
30635//
30636// value: An second seed to avoid seed collision.
30637// If not specified, defaults to 0
30638func FractionalMaxPoolSeed2(value int64) FractionalMaxPoolAttr {
30639	return func(m optionalAttr) {
30640		m["seed2"] = value
30641	}
30642}
30643
30644// Performs fractional max pooling on the input.
30645//
30646// Fractional max pooling is slightly different than regular max pooling.  In
30647// regular max pooling, you downsize an input set by taking the maximum value of
30648// smaller N x N subsections of the set (often 2x2), and try to reduce the set by
30649// a factor of N, where N is an integer.  Fractional max pooling, as you might
30650// expect from the word "fractional", means that the overall reduction ratio N
30651// does not have to be an integer.
30652//
30653// The sizes of the pooling regions are generated randomly but are fairly uniform.
30654// For example, let's look at the height dimension, and the constraints on the
30655// list of rows that will be pool boundaries.
30656//
30657// First we define the following:
30658//
30659// 1.  input_row_length : the number of rows from the input set
30660// 2.  output_row_length : which will be smaller than the input
30661// 3.  alpha = input_row_length / output_row_length : our reduction ratio
30662// 4.  K = floor(alpha)
30663// 5.  row_pooling_sequence : this is the result list of pool boundary rows
30664//
30665// Then, row_pooling_sequence should satisfy:
30666//
30667// 1.  a[0] = 0 : the first value of the sequence is 0
30668// 2.  a[end] = input_row_length : the last value of the sequence is the size
30669// 3.  K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
30670// 4.  length(row_pooling_sequence) = output_row_length+1
30671//
30672// For more details on fractional max pooling, see this paper:
30673// [Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071)
30674//
30675// Arguments:
30676//	value: 4-D with shape `[batch, height, width, channels]`.
30677//	pooling_ratio: Pooling ratio for each dimension of `value`, currently only
30678// supports row and col dimension and should be >= 1.0. For example, a valid
30679// pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements
30680// must be 1.0 because we don't allow pooling on batch and channels
30681// dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions
30682// respectively.
30683//
30684// Returns:
30685//	output: output tensor after fractional max pooling.
30686//	row_pooling_sequence: row pooling sequence, needed to calculate gradient.
30687//	col_pooling_sequence: column pooling sequence, needed to calculate gradient.
30688func FractionalMaxPool(scope *Scope, value tf.Output, pooling_ratio []float32, optional ...FractionalMaxPoolAttr) (output tf.Output, row_pooling_sequence tf.Output, col_pooling_sequence tf.Output) {
30689	if scope.Err() != nil {
30690		return
30691	}
30692	attrs := map[string]interface{}{"pooling_ratio": pooling_ratio}
30693	for _, a := range optional {
30694		a(attrs)
30695	}
30696	opspec := tf.OpSpec{
30697		Type: "FractionalMaxPool",
30698		Input: []tf.Input{
30699			value,
30700		},
30701		Attrs: attrs,
30702	}
30703	op := scope.AddOperation(opspec)
30704	return op.Output(0), op.Output(1), op.Output(2)
30705}
30706
30707// Computes the reciprocal of x element-wise.
30708//
30709// I.e., \\(y = 1 / x\\).
30710func Reciprocal(scope *Scope, x tf.Output) (y tf.Output) {
30711	if scope.Err() != nil {
30712		return
30713	}
30714	opspec := tf.OpSpec{
30715		Type: "Reciprocal",
30716		Input: []tf.Input{
30717			x,
30718		},
30719	}
30720	op := scope.AddOperation(opspec)
30721	return op.Output(0)
30722}
30723
30724// LoadTPUEmbeddingAdagradParametersGradAccumDebugAttr is an optional argument to LoadTPUEmbeddingAdagradParametersGradAccumDebug.
30725type LoadTPUEmbeddingAdagradParametersGradAccumDebugAttr func(optionalAttr)
30726
30727// LoadTPUEmbeddingAdagradParametersGradAccumDebugTableId sets the optional table_id attribute to value.
30728// If not specified, defaults to -1
30729func LoadTPUEmbeddingAdagradParametersGradAccumDebugTableId(value int64) LoadTPUEmbeddingAdagradParametersGradAccumDebugAttr {
30730	return func(m optionalAttr) {
30731		m["table_id"] = value
30732	}
30733}
30734
30735// LoadTPUEmbeddingAdagradParametersGradAccumDebugTableName sets the optional table_name attribute to value.
30736// If not specified, defaults to ""
30737func LoadTPUEmbeddingAdagradParametersGradAccumDebugTableName(value string) LoadTPUEmbeddingAdagradParametersGradAccumDebugAttr {
30738	return func(m optionalAttr) {
30739		m["table_name"] = value
30740	}
30741}
30742
30743// LoadTPUEmbeddingAdagradParametersGradAccumDebugConfig sets the optional config attribute to value.
30744// If not specified, defaults to ""
30745func LoadTPUEmbeddingAdagradParametersGradAccumDebugConfig(value string) LoadTPUEmbeddingAdagradParametersGradAccumDebugAttr {
30746	return func(m optionalAttr) {
30747		m["config"] = value
30748	}
30749}
30750
30751// Load Adagrad embedding parameters with debug support.
30752//
30753// An op that loads optimization parameters into HBM for embedding. Must be
30754// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
30755// embedding table configuration. For example, this op is used to install
30756// parameters that are loaded from a checkpoint before a training loop is
30757// executed.
30758//
30759// Arguments:
30760//	parameters: Value of parameters used in the Adagrad optimization algorithm.
30761//	accumulators: Value of accumulators used in the Adagrad optimization algorithm.
30762//	gradient_accumulators: Value of gradient_accumulators used in the Adagrad optimization algorithm.
30763//
30764//
30765//
30766// Returns the created operation.
30767func LoadTPUEmbeddingAdagradParametersGradAccumDebug(scope *Scope, parameters tf.Output, accumulators tf.Output, gradient_accumulators tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingAdagradParametersGradAccumDebugAttr) (o *tf.Operation) {
30768	if scope.Err() != nil {
30769		return
30770	}
30771	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
30772	for _, a := range optional {
30773		a(attrs)
30774	}
30775	opspec := tf.OpSpec{
30776		Type: "LoadTPUEmbeddingAdagradParametersGradAccumDebug",
30777		Input: []tf.Input{
30778			parameters, accumulators, gradient_accumulators,
30779		},
30780		Attrs: attrs,
30781	}
30782	return scope.AddOperation(opspec)
30783}
30784
30785// MapPeekAttr is an optional argument to MapPeek.
30786type MapPeekAttr func(optionalAttr)
30787
30788// MapPeekCapacity sets the optional capacity attribute to value.
30789// If not specified, defaults to 0
30790//
30791// REQUIRES: value >= 0
30792func MapPeekCapacity(value int64) MapPeekAttr {
30793	return func(m optionalAttr) {
30794		m["capacity"] = value
30795	}
30796}
30797
30798// MapPeekMemoryLimit sets the optional memory_limit attribute to value.
30799// If not specified, defaults to 0
30800//
30801// REQUIRES: value >= 0
30802func MapPeekMemoryLimit(value int64) MapPeekAttr {
30803	return func(m optionalAttr) {
30804		m["memory_limit"] = value
30805	}
30806}
30807
30808// MapPeekContainer sets the optional container attribute to value.
30809// If not specified, defaults to ""
30810func MapPeekContainer(value string) MapPeekAttr {
30811	return func(m optionalAttr) {
30812		m["container"] = value
30813	}
30814}
30815
30816// MapPeekSharedName sets the optional shared_name attribute to value.
30817// If not specified, defaults to ""
30818func MapPeekSharedName(value string) MapPeekAttr {
30819	return func(m optionalAttr) {
30820		m["shared_name"] = value
30821	}
30822}
30823
30824// Op peeks at the values at the specified key.  If the
30825//
30826// underlying container does not contain this key
30827// this op will block until it does.
30828func MapPeek(scope *Scope, key tf.Output, indices tf.Output, dtypes []tf.DataType, optional ...MapPeekAttr) (values []tf.Output) {
30829	if scope.Err() != nil {
30830		return
30831	}
30832	attrs := map[string]interface{}{"dtypes": dtypes}
30833	for _, a := range optional {
30834		a(attrs)
30835	}
30836	opspec := tf.OpSpec{
30837		Type: "MapPeek",
30838		Input: []tf.Input{
30839			key, indices,
30840		},
30841		Attrs: attrs,
30842	}
30843	op := scope.AddOperation(opspec)
30844	if scope.Err() != nil {
30845		return
30846	}
30847	var idx int
30848	var err error
30849	if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
30850		scope.UpdateErr("MapPeek", err)
30851		return
30852	}
30853	return values
30854}
30855
30856// RetrieveTPUEmbeddingCenteredRMSPropParametersAttr is an optional argument to RetrieveTPUEmbeddingCenteredRMSPropParameters.
30857type RetrieveTPUEmbeddingCenteredRMSPropParametersAttr func(optionalAttr)
30858
30859// RetrieveTPUEmbeddingCenteredRMSPropParametersTableId sets the optional table_id attribute to value.
30860// If not specified, defaults to -1
30861func RetrieveTPUEmbeddingCenteredRMSPropParametersTableId(value int64) RetrieveTPUEmbeddingCenteredRMSPropParametersAttr {
30862	return func(m optionalAttr) {
30863		m["table_id"] = value
30864	}
30865}
30866
30867// RetrieveTPUEmbeddingCenteredRMSPropParametersTableName sets the optional table_name attribute to value.
30868// If not specified, defaults to ""
30869func RetrieveTPUEmbeddingCenteredRMSPropParametersTableName(value string) RetrieveTPUEmbeddingCenteredRMSPropParametersAttr {
30870	return func(m optionalAttr) {
30871		m["table_name"] = value
30872	}
30873}
30874
30875// RetrieveTPUEmbeddingCenteredRMSPropParametersConfig sets the optional config attribute to value.
30876// If not specified, defaults to ""
30877func RetrieveTPUEmbeddingCenteredRMSPropParametersConfig(value string) RetrieveTPUEmbeddingCenteredRMSPropParametersAttr {
30878	return func(m optionalAttr) {
30879		m["config"] = value
30880	}
30881}
30882
30883// Retrieve centered RMSProp embedding parameters.
30884//
30885// An op that retrieves optimization parameters from embedding to host
30886// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
30887// the correct embedding table configuration. For example, this op is
30888// used to retrieve updated parameters before saving a checkpoint.
30889//
30890// Returns:
30891//	parameters: Parameter parameters updated by the centered RMSProp optimization algorithm.
30892//	ms: Parameter ms updated by the centered RMSProp optimization algorithm.
30893//	mom: Parameter mom updated by the centered RMSProp optimization algorithm.
30894//	mg: Parameter mg updated by the centered RMSProp optimization algorithm.
30895func RetrieveTPUEmbeddingCenteredRMSPropParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingCenteredRMSPropParametersAttr) (parameters tf.Output, ms tf.Output, mom tf.Output, mg tf.Output) {
30896	if scope.Err() != nil {
30897		return
30898	}
30899	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
30900	for _, a := range optional {
30901		a(attrs)
30902	}
30903	opspec := tf.OpSpec{
30904		Type: "RetrieveTPUEmbeddingCenteredRMSPropParameters",
30905
30906		Attrs: attrs,
30907	}
30908	op := scope.AddOperation(opspec)
30909	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
30910}
30911
30912// Returns x + y element-wise.
30913//
30914// *NOTE*: `RiscAdd` does not supports broadcasting.
30915//
30916// Given two input tensors, the `tf.risc_add` operation computes the sum for every element in the tensor.
30917//
30918// Both input and output have a range `(-inf, inf)`.
30919//
30920func RiscAdd(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
30921	if scope.Err() != nil {
30922		return
30923	}
30924	opspec := tf.OpSpec{
30925		Type: "RiscAdd",
30926		Input: []tf.Input{
30927			x, y,
30928		},
30929	}
30930	op := scope.AddOperation(opspec)
30931	return op.Output(0)
30932}
30933
30934// QuantizedMatMulAttr is an optional argument to QuantizedMatMul.
30935type QuantizedMatMulAttr func(optionalAttr)
30936
30937// QuantizedMatMulToutput sets the optional Toutput attribute to value.
30938// If not specified, defaults to DT_QINT32
30939func QuantizedMatMulToutput(value tf.DataType) QuantizedMatMulAttr {
30940	return func(m optionalAttr) {
30941		m["Toutput"] = value
30942	}
30943}
30944
30945// QuantizedMatMulTransposeA sets the optional transpose_a attribute to value.
30946//
30947// value: If true, `a` is transposed before multiplication.
30948// If not specified, defaults to false
30949func QuantizedMatMulTransposeA(value bool) QuantizedMatMulAttr {
30950	return func(m optionalAttr) {
30951		m["transpose_a"] = value
30952	}
30953}
30954
30955// QuantizedMatMulTransposeB sets the optional transpose_b attribute to value.
30956//
30957// value: If true, `b` is transposed before multiplication.
30958// If not specified, defaults to false
30959func QuantizedMatMulTransposeB(value bool) QuantizedMatMulAttr {
30960	return func(m optionalAttr) {
30961		m["transpose_b"] = value
30962	}
30963}
30964
30965// QuantizedMatMulTactivation sets the optional Tactivation attribute to value.
30966//
30967// value: The type of output produced by activation function
30968// following this operation.
30969// If not specified, defaults to DT_QUINT8
30970func QuantizedMatMulTactivation(value tf.DataType) QuantizedMatMulAttr {
30971	return func(m optionalAttr) {
30972		m["Tactivation"] = value
30973	}
30974}
30975
30976// Perform a quantized matrix multiplication of  `a` by the matrix `b`.
30977//
30978// The inputs must be two-dimensional matrices and the inner dimension of
30979// `a` (after being transposed if `transpose_a` is non-zero) must match the
30980// outer dimension of `b` (after being transposed if `transposed_b` is
30981// non-zero).
30982//
30983// Arguments:
30984//	a: Must be a two-dimensional tensor.
30985//	b: Must be a two-dimensional tensor.
30986//	min_a: The float value that the lowest quantized `a` value represents.
30987//	max_a: The float value that the highest quantized `a` value represents.
30988//	min_b: The float value that the lowest quantized `b` value represents.
30989//	max_b: The float value that the highest quantized `b` value represents.
30990//
30991// Returns:
30992//	out
30993//	min_out: The float value that the lowest quantized output value represents.
30994//	max_out: The float value that the highest quantized output value represents.
30995func QuantizedMatMul(scope *Scope, a tf.Output, b tf.Output, min_a tf.Output, max_a tf.Output, min_b tf.Output, max_b tf.Output, optional ...QuantizedMatMulAttr) (out tf.Output, min_out tf.Output, max_out tf.Output) {
30996	if scope.Err() != nil {
30997		return
30998	}
30999	attrs := map[string]interface{}{}
31000	for _, a := range optional {
31001		a(attrs)
31002	}
31003	opspec := tf.OpSpec{
31004		Type: "QuantizedMatMul",
31005		Input: []tf.Input{
31006			a, b, min_a, max_a, min_b, max_b,
31007		},
31008		Attrs: attrs,
31009	}
31010	op := scope.AddOperation(opspec)
31011	return op.Output(0), op.Output(1), op.Output(2)
31012}
31013
31014// Extract `patches` from `images` and put them in the "depth" output dimension.
31015//
31016// Arguments:
31017//	images: 4-D Tensor with shape `[batch, in_rows, in_cols, depth]`.
31018//	ksizes: The size of the sliding window for each dimension of `images`.
31019//	strides: How far the centers of two consecutive patches are in
31020// the images. Must be: `[1, stride_rows, stride_cols, 1]`.
31021//	rates: Must be: `[1, rate_rows, rate_cols, 1]`. This is the
31022// input stride, specifying how far two consecutive patch samples are in the
31023// input. Equivalent to extracting patches with
31024// `patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by
31025// subsampling them spatially by a factor of `rates`. This is equivalent to
31026// `rate` in dilated (a.k.a. Atrous) convolutions.
31027//	padding: The type of padding algorithm to use.
31028//
31029// Returns 4-D Tensor with shape `[batch, out_rows, out_cols, ksize_rows *
31030// ksize_cols * depth]` containing image patches with size
31031// `ksize_rows x ksize_cols x depth` vectorized in the "depth" dimension. Note
31032// `out_rows` and `out_cols` are the dimensions of the output patches.
31033func ExtractImagePatches(scope *Scope, images tf.Output, ksizes []int64, strides []int64, rates []int64, padding string) (patches tf.Output) {
31034	if scope.Err() != nil {
31035		return
31036	}
31037	attrs := map[string]interface{}{"ksizes": ksizes, "strides": strides, "rates": rates, "padding": padding}
31038	opspec := tf.OpSpec{
31039		Type: "ExtractImagePatches",
31040		Input: []tf.Input{
31041			images,
31042		},
31043		Attrs: attrs,
31044	}
31045	op := scope.AddOperation(opspec)
31046	return op.Output(0)
31047}
31048
31049// Forwards the value of an available tensor from `inputs` to `output`.
31050//
31051// `Merge` waits for at least one of the tensors in `inputs` to become available.
31052// It is usually combined with `Switch` to implement branching.
31053//
31054// `Merge` forwards the first tensor to become available to `output`, and sets
31055// `value_index` to its index in `inputs`.
31056//
31057// Arguments:
31058//	inputs: The input tensors, exactly one of which will become available.
31059//
31060// Returns:
31061//	output: Will be set to the available input tensor.
31062//	value_index: The index of the chosen input tensor in `inputs`.
31063func Merge(scope *Scope, inputs []tf.Output) (output tf.Output, value_index tf.Output) {
31064	if scope.Err() != nil {
31065		return
31066	}
31067	opspec := tf.OpSpec{
31068		Type: "Merge",
31069		Input: []tf.Input{
31070			tf.OutputList(inputs),
31071		},
31072	}
31073	op := scope.AddOperation(opspec)
31074	return op.Output(0), op.Output(1)
31075}
31076
31077// PaddedBatchDatasetV2Attr is an optional argument to PaddedBatchDatasetV2.
31078type PaddedBatchDatasetV2Attr func(optionalAttr)
31079
31080// PaddedBatchDatasetV2ParallelCopy sets the optional parallel_copy attribute to value.
31081// If not specified, defaults to false
31082func PaddedBatchDatasetV2ParallelCopy(value bool) PaddedBatchDatasetV2Attr {
31083	return func(m optionalAttr) {
31084		m["parallel_copy"] = value
31085	}
31086}
31087
31088// Creates a dataset that batches and pads `batch_size` elements from the input.
31089//
31090// Arguments:
31091//
31092//	batch_size: A scalar representing the number of elements to accumulate in a
31093// batch.
31094//	padded_shapes: A list of int64 tensors representing the desired padded shapes
31095// of the corresponding output components. These shapes may be partially
31096// specified, using `-1` to indicate that a particular dimension should be
31097// padded to the maximum size of all batch elements.
31098//	padding_values: A list of scalars containing the padding value to use for
31099// each of the outputs.
31100//	drop_remainder: A scalar representing whether the last batch should be dropped in case its size
31101// is smaller than desired.
31102//
31103func PaddedBatchDatasetV2(scope *Scope, input_dataset tf.Output, batch_size tf.Output, padded_shapes []tf.Output, padding_values []tf.Output, drop_remainder tf.Output, output_shapes []tf.Shape, optional ...PaddedBatchDatasetV2Attr) (handle tf.Output) {
31104	if scope.Err() != nil {
31105		return
31106	}
31107	attrs := map[string]interface{}{"output_shapes": output_shapes}
31108	for _, a := range optional {
31109		a(attrs)
31110	}
31111	opspec := tf.OpSpec{
31112		Type: "PaddedBatchDatasetV2",
31113		Input: []tf.Input{
31114			input_dataset, batch_size, tf.OutputList(padded_shapes), tf.OutputList(padding_values), drop_remainder,
31115		},
31116		Attrs: attrs,
31117	}
31118	op := scope.AddOperation(opspec)
31119	return op.Output(0)
31120}
31121
31122// BlockLSTMV2Attr is an optional argument to BlockLSTMV2.
31123type BlockLSTMV2Attr func(optionalAttr)
31124
31125// BlockLSTMV2CellClip sets the optional cell_clip attribute to value.
31126//
31127// value: Value to clip the 'cs' value to.
31128// If not specified, defaults to 0
31129func BlockLSTMV2CellClip(value float32) BlockLSTMV2Attr {
31130	return func(m optionalAttr) {
31131		m["cell_clip"] = value
31132	}
31133}
31134
31135// BlockLSTMV2UsePeephole sets the optional use_peephole attribute to value.
31136//
31137// value: Whether to use peephole weights.
31138// If not specified, defaults to false
31139func BlockLSTMV2UsePeephole(value bool) BlockLSTMV2Attr {
31140	return func(m optionalAttr) {
31141		m["use_peephole"] = value
31142	}
31143}
31144
31145// Computes the LSTM cell forward propagation for all the time steps.
31146//
31147// This is equivalent to applying LSTMBlockCell in a loop, like so:
31148//
31149// ```python
31150// for x1 in unpack(x):
31151//   i1, cs1, f1, o1, ci1, co1, h1 = LSTMBlock(
31152//     x1, cs_prev, h_prev, w, wci, wcf, wco, b)
31153//   cs_prev = cs1
31154//   h_prev = h1
31155//   i.append(i1)
31156//   cs.append(cs1)
31157//   f.append(f1)
31158//   o.append(o1)
31159//   ci.append(ci1)
31160//   co.append(co1)
31161//   h.append(h1)
31162// return pack(i), pack(cs), pack(f), pack(o), pack(ci), pack(ch), pack(h)
31163//
31164// Note that unlike LSTMBlockCell (and BlockLSTM) which uses ICFO gate layout,
31165// this op uses IFCO. So in order for the following snippet to be equivalent
31166// all gate-related outputs should be reordered.
31167// ```
31168//
31169// Arguments:
31170//	seq_len_max: Maximum time length actually used by this input. Outputs are padded
31171// with zeros beyond this length.
31172//	x: The sequence input to the LSTM, shape (timelen, batch_size, num_inputs).
31173//	cs_prev: Value of the initial cell state.
31174//	h_prev: Initial output of cell (to be used for peephole).
31175//	w: The weight matrix.
31176//	wci: The weight matrix for input gate peephole connection.
31177//	wcf: The weight matrix for forget gate peephole connection.
31178//	wco: The weight matrix for output gate peephole connection.
31179//	b: The bias vector.
31180//
31181// Returns:
31182//	i: The input gate over the whole time sequence.
31183//	cs: The cell state before the tanh over the whole time sequence.
31184//	f: The forget gate over the whole time sequence.
31185//	o: The output gate over the whole time sequence.
31186//	ci: The cell input over the whole time sequence.
31187//	co: The cell after the tanh over the whole time sequence.
31188//	h: The output h vector over the whole time sequence.
31189func BlockLSTMV2(scope *Scope, seq_len_max tf.Output, x tf.Output, cs_prev tf.Output, h_prev tf.Output, w tf.Output, wci tf.Output, wcf tf.Output, wco tf.Output, b tf.Output, optional ...BlockLSTMV2Attr) (i tf.Output, cs tf.Output, f tf.Output, o tf.Output, ci tf.Output, co tf.Output, h tf.Output) {
31190	if scope.Err() != nil {
31191		return
31192	}
31193	attrs := map[string]interface{}{}
31194	for _, a := range optional {
31195		a(attrs)
31196	}
31197	opspec := tf.OpSpec{
31198		Type: "BlockLSTMV2",
31199		Input: []tf.Input{
31200			seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b,
31201		},
31202		Attrs: attrs,
31203	}
31204	op := scope.AddOperation(opspec)
31205	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4), op.Output(5), op.Output(6)
31206}
31207
31208// Return a tensor with the same shape and contents as the input tensor or value.
31209func Identity(scope *Scope, input tf.Output) (output tf.Output) {
31210	if scope.Err() != nil {
31211		return
31212	}
31213	opspec := tf.OpSpec{
31214		Type: "Identity",
31215		Input: []tf.Input{
31216			input,
31217		},
31218	}
31219	op := scope.AddOperation(opspec)
31220	return op.Output(0)
31221}
31222
31223// Outputs a `Summary` protocol buffer with scalar values.
31224//
31225// The input `tags` and `values` must have the same shape.  The generated summary
31226// has a summary value for each tag-value pair in `tags` and `values`.
31227//
31228// Arguments:
31229//	tags: Tags for the summary.
31230//	values: Same shape as `tags.  Values for the summary.
31231//
31232// Returns Scalar.  Serialized `Summary` protocol buffer.
31233func ScalarSummary(scope *Scope, tags tf.Output, values tf.Output) (summary tf.Output) {
31234	if scope.Err() != nil {
31235		return
31236	}
31237	opspec := tf.OpSpec{
31238		Type: "ScalarSummary",
31239		Input: []tf.Input{
31240			tags, values,
31241		},
31242	}
31243	op := scope.AddOperation(opspec)
31244	return op.Output(0)
31245}
31246
31247// ResourceSparseApplyProximalAdagradAttr is an optional argument to ResourceSparseApplyProximalAdagrad.
31248type ResourceSparseApplyProximalAdagradAttr func(optionalAttr)
31249
31250// ResourceSparseApplyProximalAdagradUseLocking sets the optional use_locking attribute to value.
31251//
31252// value: If True, updating of the var and accum tensors will be protected by
31253// a lock; otherwise the behavior is undefined, but may exhibit less contention.
31254// If not specified, defaults to false
31255func ResourceSparseApplyProximalAdagradUseLocking(value bool) ResourceSparseApplyProximalAdagradAttr {
31256	return func(m optionalAttr) {
31257		m["use_locking"] = value
31258	}
31259}
31260
31261// Sparse update entries in '*var' and '*accum' according to FOBOS algorithm.
31262//
31263// That is for rows we have grad for, we update var and accum as follows:
31264// accum += grad * grad
31265// prox_v = var
31266// prox_v -= lr * grad * (1 / sqrt(accum))
31267// var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
31268//
31269// Arguments:
31270//	var_: Should be from a Variable().
31271//	accum: Should be from a Variable().
31272//	lr: Learning rate. Must be a scalar.
31273//	l1: L1 regularization. Must be a scalar.
31274//	l2: L2 regularization. Must be a scalar.
31275//	grad: The gradient.
31276//	indices: A vector of indices into the first dimension of var and accum.
31277//
31278// Returns the created operation.
31279func ResourceSparseApplyProximalAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyProximalAdagradAttr) (o *tf.Operation) {
31280	if scope.Err() != nil {
31281		return
31282	}
31283	attrs := map[string]interface{}{}
31284	for _, a := range optional {
31285		a(attrs)
31286	}
31287	opspec := tf.OpSpec{
31288		Type: "ResourceSparseApplyProximalAdagrad",
31289		Input: []tf.Input{
31290			var_, accum, lr, l1, l2, grad, indices,
31291		},
31292		Attrs: attrs,
31293	}
31294	return scope.AddOperation(opspec)
31295}
31296
31297// Computes numerical negative value element-wise.
31298//
31299// I.e., \\(y = -x\\).
31300func Neg(scope *Scope, x tf.Output) (y tf.Output) {
31301	if scope.Err() != nil {
31302		return
31303	}
31304	opspec := tf.OpSpec{
31305		Type: "Neg",
31306		Input: []tf.Input{
31307			x,
31308		},
31309	}
31310	op := scope.AddOperation(opspec)
31311	return op.Output(0)
31312}
31313
31314// Concatenates tensors along one dimension.
31315//
31316// Arguments:
31317//	values: List of `N` Tensors to concatenate. Their ranks and types must match,
31318// and their sizes must match in all dimensions except `concat_dim`.
31319//	axis: 0-D.  The dimension along which to concatenate.  Must be in the
31320// range [-rank(values), rank(values)).
31321//
31322// Returns A `Tensor` with the concatenation of values stacked along the
31323// `concat_dim` dimension.  This tensor's shape matches that of `values` except
31324// in `concat_dim` where it has the sum of the sizes.
31325func ConcatV2(scope *Scope, values []tf.Output, axis tf.Output) (output tf.Output) {
31326	if scope.Err() != nil {
31327		return
31328	}
31329	opspec := tf.OpSpec{
31330		Type: "ConcatV2",
31331		Input: []tf.Input{
31332			tf.OutputList(values), axis,
31333		},
31334	}
31335	op := scope.AddOperation(opspec)
31336	return op.Output(0)
31337}
31338
31339// Elementwise computes the bitwise right-shift of `x` and `y`.
31340//
31341// Performs a logical shift for unsigned integer types, and an arithmetic shift
31342// for signed integer types.
31343//
31344// If `y` is negative, or greater than or equal to than the width of `x` in bits
31345// the result is implementation defined.
31346//
31347// Example:
31348//
31349// ```python
31350// import tensorflow as tf
31351// from tensorflow.python.ops import bitwise_ops
31352// import numpy as np
31353// dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64]
31354//
31355// for dtype in dtype_list:
31356//   lhs = tf.constant([-1, -5, -3, -14], dtype=dtype)
31357//   rhs = tf.constant([5, 0, 7, 11], dtype=dtype)
31358//
31359//   right_shift_result = bitwise_ops.right_shift(lhs, rhs)
31360//
31361//   print(right_shift_result)
31362//
31363// # This will print:
31364// # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int8)
31365// # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int16)
31366// # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int32)
31367// # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int64)
31368//
31369// lhs = np.array([-2, 64, 101, 32], dtype=np.int8)
31370// rhs = np.array([-1, -5, -3, -14], dtype=np.int8)
31371// bitwise_ops.right_shift(lhs, rhs)
31372// # <tf.Tensor: shape=(4,), dtype=int8, numpy=array([ -2,  64, 101,  32], dtype=int8)>
31373// ```
31374//
31375func RightShift(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
31376	if scope.Err() != nil {
31377		return
31378	}
31379	opspec := tf.OpSpec{
31380		Type: "RightShift",
31381		Input: []tf.Input{
31382			x, y,
31383		},
31384	}
31385	op := scope.AddOperation(opspec)
31386	return op.Output(0)
31387}
31388
31389// IteratorFromStringHandleAttr is an optional argument to IteratorFromStringHandle.
31390type IteratorFromStringHandleAttr func(optionalAttr)
31391
31392// IteratorFromStringHandleOutputTypes sets the optional output_types attribute to value.
31393//
31394// value: If specified, defines the type of each tuple component in an
31395// element produced by the resulting iterator.
31396// If not specified, defaults to <>
31397//
31398// REQUIRES: len(value) >= 0
31399func IteratorFromStringHandleOutputTypes(value []tf.DataType) IteratorFromStringHandleAttr {
31400	return func(m optionalAttr) {
31401		m["output_types"] = value
31402	}
31403}
31404
31405// IteratorFromStringHandleOutputShapes sets the optional output_shapes attribute to value.
31406//
31407// value: If specified, defines the shape of each tuple component in an
31408// element produced by the resulting iterator.
31409// If not specified, defaults to <>
31410//
31411// REQUIRES: len(value) >= 0
31412func IteratorFromStringHandleOutputShapes(value []tf.Shape) IteratorFromStringHandleAttr {
31413	return func(m optionalAttr) {
31414		m["output_shapes"] = value
31415	}
31416}
31417
31418// Converts the given string representing a handle to an iterator to a resource.
31419//
31420// Arguments:
31421//	string_handle: A string representation of the given handle.
31422//
31423// Returns A handle to an iterator resource.
31424func IteratorFromStringHandle(scope *Scope, string_handle tf.Output, optional ...IteratorFromStringHandleAttr) (resource_handle tf.Output) {
31425	if scope.Err() != nil {
31426		return
31427	}
31428	attrs := map[string]interface{}{}
31429	for _, a := range optional {
31430		a(attrs)
31431	}
31432	opspec := tf.OpSpec{
31433		Type: "IteratorFromStringHandle",
31434		Input: []tf.Input{
31435			string_handle,
31436		},
31437		Attrs: attrs,
31438	}
31439	op := scope.AddOperation(opspec)
31440	return op.Output(0)
31441}
31442
31443// WriteAudioSummaryAttr is an optional argument to WriteAudioSummary.
31444type WriteAudioSummaryAttr func(optionalAttr)
31445
31446// WriteAudioSummaryMaxOutputs sets the optional max_outputs attribute to value.
31447// If not specified, defaults to 3
31448//
31449// REQUIRES: value >= 1
31450func WriteAudioSummaryMaxOutputs(value int64) WriteAudioSummaryAttr {
31451	return func(m optionalAttr) {
31452		m["max_outputs"] = value
31453	}
31454}
31455
31456// Writes an audio summary.
31457//
31458// Writes encoded audio summary `tensor` at `step` with `tag` using summary `writer`.
31459// `sample_rate` is the audio sample rate is Hz.
31460//
31461// Returns the created operation.
31462func WriteAudioSummary(scope *Scope, writer tf.Output, step tf.Output, tag tf.Output, tensor tf.Output, sample_rate tf.Output, optional ...WriteAudioSummaryAttr) (o *tf.Operation) {
31463	if scope.Err() != nil {
31464		return
31465	}
31466	attrs := map[string]interface{}{}
31467	for _, a := range optional {
31468		a(attrs)
31469	}
31470	opspec := tf.OpSpec{
31471		Type: "WriteAudioSummary",
31472		Input: []tf.Input{
31473			writer, step, tag, tensor, sample_rate,
31474		},
31475		Attrs: attrs,
31476	}
31477	return scope.AddOperation(opspec)
31478}
31479
31480// Performs gradient updates of embedding tables.
31481//
31482// Arguments:
31483//	inputs: A TensorList of gradients with which to update embedding tables.
31484// This argument has the same length and shapes as the return value of
31485// RecvTPUEmbeddingActivations, but contains gradients of the model's loss
31486// with respect to the embedding activations. The embedding tables are updated
31487// from these gradients via the optimizer specified in the TPU embedding
31488// configuration given to tpu.initialize_system.
31489//	learning_rates: A TensorList of float32 scalars, one for each dynamic learning
31490// rate tag: see the comments in
31491// //third_party/tensorflow/core/protobuf/tpu/optimization_parameters.proto.
31492// Multiple tables can share the same dynamic learning rate tag as specified
31493// in the configuration. If the learning rates for all tables are constant,
31494// this list should be empty.
31495//	config: Serialized TPUEmbeddingConfiguration proto.
31496//
31497// Returns the created operation.
31498func SendTPUEmbeddingGradients(scope *Scope, inputs []tf.Output, learning_rates []tf.Output, config string) (o *tf.Operation) {
31499	if scope.Err() != nil {
31500		return
31501	}
31502	attrs := map[string]interface{}{"config": config}
31503	opspec := tf.OpSpec{
31504		Type: "SendTPUEmbeddingGradients",
31505		Input: []tf.Input{
31506			tf.OutputList(inputs), tf.OutputList(learning_rates),
31507		},
31508		Attrs: attrs,
31509	}
31510	return scope.AddOperation(opspec)
31511}
31512
31513// CumsumAttr is an optional argument to Cumsum.
31514type CumsumAttr func(optionalAttr)
31515
31516// CumsumExclusive sets the optional exclusive attribute to value.
31517//
31518// value: If `True`, perform exclusive cumsum.
31519// If not specified, defaults to false
31520func CumsumExclusive(value bool) CumsumAttr {
31521	return func(m optionalAttr) {
31522		m["exclusive"] = value
31523	}
31524}
31525
31526// CumsumReverse sets the optional reverse attribute to value.
31527//
31528// value: A `bool` (default: False).
31529// If not specified, defaults to false
31530func CumsumReverse(value bool) CumsumAttr {
31531	return func(m optionalAttr) {
31532		m["reverse"] = value
31533	}
31534}
31535
31536// Compute the cumulative sum of the tensor `x` along `axis`.
31537//
31538// By default, this op performs an inclusive cumsum, which means that the first
31539// element of the input is identical to the first element of the output:
31540//
31541// ```python
31542// tf.cumsum([a, b, c])  # => [a, a + b, a + b + c]
31543// ```
31544//
31545// By setting the `exclusive` kwarg to `True`, an exclusive cumsum is
31546// performed instead:
31547//
31548// ```python
31549// tf.cumsum([a, b, c], exclusive=True)  # => [0, a, a + b]
31550// ```
31551//
31552// By setting the `reverse` kwarg to `True`, the cumsum is performed in the
31553// opposite direction:
31554//
31555// ```python
31556// tf.cumsum([a, b, c], reverse=True)  # => [a + b + c, b + c, c]
31557// ```
31558//
31559// This is more efficient than using separate `tf.reverse` ops.
31560//
31561// The `reverse` and `exclusive` kwargs can also be combined:
31562//
31563// ```python
31564// tf.cumsum([a, b, c], exclusive=True, reverse=True)  # => [b + c, c, 0]
31565// ```
31566//
31567// Arguments:
31568//	x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
31569// `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
31570// `complex128`, `qint8`, `quint8`, `qint32`, `half`.
31571//	axis: A `Tensor` of type `int32` (default: 0). Must be in the range
31572// `[-rank(x), rank(x))`.
31573func Cumsum(scope *Scope, x tf.Output, axis tf.Output, optional ...CumsumAttr) (out tf.Output) {
31574	if scope.Err() != nil {
31575		return
31576	}
31577	attrs := map[string]interface{}{}
31578	for _, a := range optional {
31579		a(attrs)
31580	}
31581	opspec := tf.OpSpec{
31582		Type: "Cumsum",
31583		Input: []tf.Input{
31584			x, axis,
31585		},
31586		Attrs: attrs,
31587	}
31588	op := scope.AddOperation(opspec)
31589	return op.Output(0)
31590}
31591
31592// Encode audio data using the WAV file format.
31593//
31594// This operation will generate a string suitable to be saved out to create a .wav
31595// audio file. It will be encoded in the 16-bit PCM format. It takes in float
31596// values in the range -1.0f to 1.0f, and any outside that value will be clamped to
31597// that range.
31598//
31599// `audio` is a 2-D float Tensor of shape `[length, channels]`.
31600// `sample_rate` is a scalar Tensor holding the rate to use (e.g. 44100).
31601//
31602// Arguments:
31603//	audio: 2-D with shape `[length, channels]`.
31604//	sample_rate: Scalar containing the sample frequency.
31605//
31606// Returns 0-D. WAV-encoded file contents.
31607func EncodeWav(scope *Scope, audio tf.Output, sample_rate tf.Output) (contents tf.Output) {
31608	if scope.Err() != nil {
31609		return
31610	}
31611	opspec := tf.OpSpec{
31612		Type: "EncodeWav",
31613		Input: []tf.Input{
31614			audio, sample_rate,
31615		},
31616	}
31617	op := scope.AddOperation(opspec)
31618	return op.Output(0)
31619}
31620
31621// EuclideanNormAttr is an optional argument to EuclideanNorm.
31622type EuclideanNormAttr func(optionalAttr)
31623
31624// EuclideanNormKeepDims sets the optional keep_dims attribute to value.
31625//
31626// value: If true, retain reduced dimensions with length 1.
31627// If not specified, defaults to false
31628func EuclideanNormKeepDims(value bool) EuclideanNormAttr {
31629	return func(m optionalAttr) {
31630		m["keep_dims"] = value
31631	}
31632}
31633
31634// Computes the euclidean norm of elements across dimensions of a tensor.
31635//
31636// Reduces `input` along the dimensions given in `axis`. Unless
31637// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
31638// `axis`. If `keep_dims` is true, the reduced dimensions are
31639// retained with length 1.
31640//
31641// Arguments:
31642//	input: The tensor to reduce.
31643//	axis: The dimensions to reduce. Must be in the range
31644// `[-rank(input), rank(input))`.
31645//
31646// Returns The reduced tensor.
31647func EuclideanNorm(scope *Scope, input tf.Output, axis tf.Output, optional ...EuclideanNormAttr) (output tf.Output) {
31648	if scope.Err() != nil {
31649		return
31650	}
31651	attrs := map[string]interface{}{}
31652	for _, a := range optional {
31653		a(attrs)
31654	}
31655	opspec := tf.OpSpec{
31656		Type: "EuclideanNorm",
31657		Input: []tf.Input{
31658			input, axis,
31659		},
31660		Attrs: attrs,
31661	}
31662	op := scope.AddOperation(opspec)
31663	return op.Output(0)
31664}
31665
31666// IRFFT3DAttr is an optional argument to IRFFT3D.
31667type IRFFT3DAttr func(optionalAttr)
31668
31669// IRFFT3DTreal sets the optional Treal attribute to value.
31670// If not specified, defaults to DT_FLOAT
31671func IRFFT3DTreal(value tf.DataType) IRFFT3DAttr {
31672	return func(m optionalAttr) {
31673		m["Treal"] = value
31674	}
31675}
31676
31677// Inverse 3D real-valued fast Fourier transform.
31678//
31679// Computes the inverse 3-dimensional discrete Fourier transform of a real-valued
31680// signal over the inner-most 3 dimensions of `input`.
31681//
31682// The inner-most 3 dimensions of `input` are assumed to be the result of `RFFT3D`:
31683// The inner-most dimension contains the `fft_length / 2 + 1` unique components of
31684// the DFT of a real-valued signal. If `fft_length` is not provided, it is computed
31685// from the size of the inner-most 3 dimensions of `input`. If the FFT length used
31686// to compute `input` is odd, it should be provided since it cannot be inferred
31687// properly.
31688//
31689// Along each axis `IRFFT3D` is computed on, if `fft_length` (or
31690// `fft_length / 2 + 1` for the inner-most dimension) is smaller than the
31691// corresponding dimension of `input`, the dimension is cropped. If it is larger,
31692// the dimension is padded with zeros.
31693//
31694// Arguments:
31695//	input: A complex tensor.
31696//	fft_length: An int32 tensor of shape [3]. The FFT length for each dimension.
31697//
31698// Returns A float32 tensor of the same rank as `input`. The inner-most 3
31699//   dimensions of `input` are replaced with the `fft_length` samples of their
31700//   inverse 3D real Fourier transform.
31701//
31702// @compatibility(numpy)
31703// Equivalent to np.irfftn with 3 dimensions.
31704// @end_compatibility
31705func IRFFT3D(scope *Scope, input tf.Output, fft_length tf.Output, optional ...IRFFT3DAttr) (output tf.Output) {
31706	if scope.Err() != nil {
31707		return
31708	}
31709	attrs := map[string]interface{}{}
31710	for _, a := range optional {
31711		a(attrs)
31712	}
31713	opspec := tf.OpSpec{
31714		Type: "IRFFT3D",
31715		Input: []tf.Input{
31716			input, fft_length,
31717		},
31718		Attrs: attrs,
31719	}
31720	op := scope.AddOperation(opspec)
31721	return op.Output(0)
31722}
31723
31724// Returns the element-wise min of two SparseTensors.
31725//
31726// Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
31727//
31728// Arguments:
31729//	a_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
31730// SparseTensor, in the canonical lexicographic ordering.
31731//	a_values: 1-D.  `N` non-empty values corresponding to `a_indices`.
31732//	a_shape: 1-D.  Shape of the input SparseTensor.
31733//	b_indices: counterpart to `a_indices` for the other operand.
31734//	b_values: counterpart to `a_values` for the other operand; must be of the same dtype.
31735//	b_shape: counterpart to `a_shape` for the other operand; the two shapes must be equal.
31736//
31737// Returns:
31738//	output_indices: 2-D.  The indices of the output SparseTensor.
31739//	output_values: 1-D.  The values of the output SparseTensor.
31740func SparseSparseMinimum(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, b_indices tf.Output, b_values tf.Output, b_shape tf.Output) (output_indices tf.Output, output_values tf.Output) {
31741	if scope.Err() != nil {
31742		return
31743	}
31744	opspec := tf.OpSpec{
31745		Type: "SparseSparseMinimum",
31746		Input: []tf.Input{
31747			a_indices, a_values, a_shape, b_indices, b_values, b_shape,
31748		},
31749	}
31750	op := scope.AddOperation(opspec)
31751	return op.Output(0), op.Output(1)
31752}
31753
31754// StatefulStandardNormalV2Attr is an optional argument to StatefulStandardNormalV2.
31755type StatefulStandardNormalV2Attr func(optionalAttr)
31756
31757// StatefulStandardNormalV2Dtype sets the optional dtype attribute to value.
31758//
31759// value: The type of the output.
31760// If not specified, defaults to DT_FLOAT
31761func StatefulStandardNormalV2Dtype(value tf.DataType) StatefulStandardNormalV2Attr {
31762	return func(m optionalAttr) {
31763		m["dtype"] = value
31764	}
31765}
31766
31767// Outputs random values from a normal distribution.
31768//
31769// The generated values will have mean 0 and standard deviation 1.
31770//
31771// Arguments:
31772//	resource: The handle of the resource variable that stores the state of the RNG.
31773//	algorithm: The RNG algorithm.
31774//	shape: The shape of the output tensor.
31775//
31776// Returns A tensor of the specified shape filled with random normal values.
31777func StatefulStandardNormalV2(scope *Scope, resource tf.Output, algorithm tf.Output, shape tf.Output, optional ...StatefulStandardNormalV2Attr) (output tf.Output) {
31778	if scope.Err() != nil {
31779		return
31780	}
31781	attrs := map[string]interface{}{}
31782	for _, a := range optional {
31783		a(attrs)
31784	}
31785	opspec := tf.OpSpec{
31786		Type: "StatefulStandardNormalV2",
31787		Input: []tf.Input{
31788			resource, algorithm, shape,
31789		},
31790		Attrs: attrs,
31791	}
31792	op := scope.AddOperation(opspec)
31793	return op.Output(0)
31794}
31795
31796// Helper used to compute the gradient for `RaggedTensorToVariant`.
31797//
31798// Computes the gradient for the dense_values input to the RaggedTensorToVariant
31799// op, given the variant-encoded ragged gradients of the outputs, along with
31800// the outer row-splits and the shape of the dense-values that were provided as
31801// inputs to the RaggedTensorToVariant op.
31802//
31803// Arguments:
31804//	encoded_ragged_grad: A `variant` Tensor containing encoded `RaggedTensor` gradients.
31805//	row_splits: Outermost row-splits that were used as input to the RaggedTensorToVariant op.
31806//	dense_values_shape: Shape of the dense_values that was used as an input to the
31807// RaggedTensorToVariant op.
31808//
31809//
31810// Returns Gradient for the dense_values of the RaggedTensorToVariant op.
31811func RaggedTensorToVariantGradient(scope *Scope, encoded_ragged_grad tf.Output, row_splits tf.Output, dense_values_shape tf.Output, Tvalues tf.DataType) (dense_values_grad tf.Output) {
31812	if scope.Err() != nil {
31813		return
31814	}
31815	attrs := map[string]interface{}{"Tvalues": Tvalues}
31816	opspec := tf.OpSpec{
31817		Type: "RaggedTensorToVariantGradient",
31818		Input: []tf.Input{
31819			encoded_ragged_grad, row_splits, dense_values_shape,
31820		},
31821		Attrs: attrs,
31822	}
31823	op := scope.AddOperation(opspec)
31824	return op.Output(0)
31825}
31826
31827// ResourceSparseApplyFtrlAttr is an optional argument to ResourceSparseApplyFtrl.
31828type ResourceSparseApplyFtrlAttr func(optionalAttr)
31829
31830// ResourceSparseApplyFtrlUseLocking sets the optional use_locking attribute to value.
31831//
31832// value: If `True`, updating of the var and accum tensors will be protected
31833// by a lock; otherwise the behavior is undefined, but may exhibit less
31834// contention.
31835// If not specified, defaults to false
31836func ResourceSparseApplyFtrlUseLocking(value bool) ResourceSparseApplyFtrlAttr {
31837	return func(m optionalAttr) {
31838		m["use_locking"] = value
31839	}
31840}
31841
31842// ResourceSparseApplyFtrlMultiplyLinearByLr sets the optional multiply_linear_by_lr attribute to value.
31843// If not specified, defaults to false
31844func ResourceSparseApplyFtrlMultiplyLinearByLr(value bool) ResourceSparseApplyFtrlAttr {
31845	return func(m optionalAttr) {
31846		m["multiply_linear_by_lr"] = value
31847	}
31848}
31849
31850// Update relevant entries in '*var' according to the Ftrl-proximal scheme.
31851//
31852// That is for rows we have grad for, we update var, accum and linear as follows:
31853// accum_new = accum + grad * grad
31854// linear += grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
31855// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
31856// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
31857// accum = accum_new
31858//
31859// Arguments:
31860//	var_: Should be from a Variable().
31861//	accum: Should be from a Variable().
31862//	linear: Should be from a Variable().
31863//	grad: The gradient.
31864//	indices: A vector of indices into the first dimension of var and accum.
31865//	lr: Scaling factor. Must be a scalar.
31866//	l1: L1 regularization. Must be a scalar.
31867//	l2: L2 regularization. Must be a scalar.
31868//	lr_power: Scaling factor. Must be a scalar.
31869//
31870// Returns the created operation.
31871func ResourceSparseApplyFtrl(scope *Scope, var_ tf.Output, accum tf.Output, linear tf.Output, grad tf.Output, indices tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, lr_power tf.Output, optional ...ResourceSparseApplyFtrlAttr) (o *tf.Operation) {
31872	if scope.Err() != nil {
31873		return
31874	}
31875	attrs := map[string]interface{}{}
31876	for _, a := range optional {
31877		a(attrs)
31878	}
31879	opspec := tf.OpSpec{
31880		Type: "ResourceSparseApplyFtrl",
31881		Input: []tf.Input{
31882			var_, accum, linear, grad, indices, lr, l1, l2, lr_power,
31883		},
31884		Attrs: attrs,
31885	}
31886	return scope.AddOperation(opspec)
31887}
31888
31889// Debugging/model interpretability outputs for each example.
31890//
31891// It traverses all the trees and computes debug metrics for individual examples,
31892// such as getting split feature ids and logits after each split along the decision
31893// path used to compute directional feature contributions.
31894//
31895// Arguments:
31896//
31897//	bucketized_features: A list of rank 1 Tensors containing bucket id for each
31898// feature.
31899//	logits_dimension: scalar, dimension of the logits, to be used for constructing the protos in
31900// examples_debug_outputs_serialized.
31901//
31902// Returns Output rank 1 Tensor containing a proto serialized as a string for each example.
31903func BoostedTreesExampleDebugOutputs(scope *Scope, tree_ensemble_handle tf.Output, bucketized_features []tf.Output, logits_dimension int64) (examples_debug_outputs_serialized tf.Output) {
31904	if scope.Err() != nil {
31905		return
31906	}
31907	attrs := map[string]interface{}{"logits_dimension": logits_dimension}
31908	opspec := tf.OpSpec{
31909		Type: "BoostedTreesExampleDebugOutputs",
31910		Input: []tf.Input{
31911			tree_ensemble_handle, tf.OutputList(bucketized_features),
31912		},
31913		Attrs: attrs,
31914	}
31915	op := scope.AddOperation(opspec)
31916	return op.Output(0)
31917}
31918
31919// StatefulUniformAttr is an optional argument to StatefulUniform.
31920type StatefulUniformAttr func(optionalAttr)
31921
31922// StatefulUniformDtype sets the optional dtype attribute to value.
31923//
31924// value: The type of the output.
31925// If not specified, defaults to DT_FLOAT
31926func StatefulUniformDtype(value tf.DataType) StatefulUniformAttr {
31927	return func(m optionalAttr) {
31928		m["dtype"] = value
31929	}
31930}
31931
31932// Outputs random values from a uniform distribution.
31933//
31934// The generated values follow a uniform distribution in the range `[0, 1)`. The
31935// lower bound 0 is included in the range, while the upper bound 1 is excluded.
31936//
31937// Arguments:
31938//	resource: The handle of the resource variable that stores the state of the RNG.
31939//	algorithm: The RNG algorithm.
31940//	shape: The shape of the output tensor.
31941//
31942// Returns Random values with specified shape.
31943func StatefulUniform(scope *Scope, resource tf.Output, algorithm tf.Output, shape tf.Output, optional ...StatefulUniformAttr) (output tf.Output) {
31944	if scope.Err() != nil {
31945		return
31946	}
31947	attrs := map[string]interface{}{}
31948	for _, a := range optional {
31949		a(attrs)
31950	}
31951	opspec := tf.OpSpec{
31952		Type: "StatefulUniform",
31953		Input: []tf.Input{
31954			resource, algorithm, shape,
31955		},
31956		Attrs: attrs,
31957	}
31958	op := scope.AddOperation(opspec)
31959	return op.Output(0)
31960}
31961
31962// Computes rectified linear 6 gradients for a Relu6 operation.
31963//
31964// Arguments:
31965//	gradients: The backpropagated gradients to the corresponding Relu6 operation.
31966//	features: The features passed as input to the corresponding Relu6 operation, or
31967// its output; using either one produces the same result.
31968//
31969// Returns The gradients:
31970// `gradients * (features > 0) * (features < 6)`.
31971func Relu6Grad(scope *Scope, gradients tf.Output, features tf.Output) (backprops tf.Output) {
31972	if scope.Err() != nil {
31973		return
31974	}
31975	opspec := tf.OpSpec{
31976		Type: "Relu6Grad",
31977		Input: []tf.Input{
31978			gradients, features,
31979		},
31980	}
31981	op := scope.AddOperation(opspec)
31982	return op.Output(0)
31983}
31984
31985// EditDistanceAttr is an optional argument to EditDistance.
31986type EditDistanceAttr func(optionalAttr)
31987
31988// EditDistanceNormalize sets the optional normalize attribute to value.
31989//
31990// value: boolean (if true, edit distances are normalized by length of truth).
31991//
31992// The output is:
31993// If not specified, defaults to true
31994func EditDistanceNormalize(value bool) EditDistanceAttr {
31995	return func(m optionalAttr) {
31996		m["normalize"] = value
31997	}
31998}
31999
32000// Computes the (possibly normalized) Levenshtein Edit Distance.
32001//
32002// The inputs are variable-length sequences provided by SparseTensors
32003//   (hypothesis_indices, hypothesis_values, hypothesis_shape)
32004// and
32005//   (truth_indices, truth_values, truth_shape).
32006//
32007// The inputs are:
32008//
32009// Arguments:
32010//	hypothesis_indices: The indices of the hypothesis list SparseTensor.
32011// This is an N x R int64 matrix.
32012//	hypothesis_values: The values of the hypothesis list SparseTensor.
32013// This is an N-length vector.
32014//	hypothesis_shape: The shape of the hypothesis list SparseTensor.
32015// This is an R-length vector.
32016//	truth_indices: The indices of the truth list SparseTensor.
32017// This is an M x R int64 matrix.
32018//	truth_values: The values of the truth list SparseTensor.
32019// This is an M-length vector.
32020//	truth_shape: truth indices, vector.
32021//
32022// Returns A dense float tensor with rank R - 1.
32023//
32024// For the example input:
32025//
32026//     // hypothesis represents a 2x1 matrix with variable-length values:
32027//     //   (0,0) = ["a"]
32028//     //   (1,0) = ["b"]
32029//     hypothesis_indices = [[0, 0, 0],
32030//                           [1, 0, 0]]
32031//     hypothesis_values = ["a", "b"]
32032//     hypothesis_shape = [2, 1, 1]
32033//
32034//     // truth represents a 2x2 matrix with variable-length values:
32035//     //   (0,0) = []
32036//     //   (0,1) = ["a"]
32037//     //   (1,0) = ["b", "c"]
32038//     //   (1,1) = ["a"]
32039//     truth_indices = [[0, 1, 0],
32040//                      [1, 0, 0],
32041//                      [1, 0, 1],
32042//                      [1, 1, 0]]
32043//     truth_values = ["a", "b", "c", "a"]
32044//     truth_shape = [2, 2, 2]
32045//     normalize = true
32046//
32047// The output will be:
32048//
32049//     // output is a 2x2 matrix with edit distances normalized by truth lengths.
32050//     output = [[inf, 1.0],  // (0,0): no truth, (0,1): no hypothesis
32051//               [0.5, 1.0]]  // (1,0): addition, (1,1): no hypothesis
32052func EditDistance(scope *Scope, hypothesis_indices tf.Output, hypothesis_values tf.Output, hypothesis_shape tf.Output, truth_indices tf.Output, truth_values tf.Output, truth_shape tf.Output, optional ...EditDistanceAttr) (output tf.Output) {
32053	if scope.Err() != nil {
32054		return
32055	}
32056	attrs := map[string]interface{}{}
32057	for _, a := range optional {
32058		a(attrs)
32059	}
32060	opspec := tf.OpSpec{
32061		Type: "EditDistance",
32062		Input: []tf.Input{
32063			hypothesis_indices, hypothesis_values, hypothesis_shape, truth_indices, truth_values, truth_shape,
32064		},
32065		Attrs: attrs,
32066	}
32067	op := scope.AddOperation(opspec)
32068	return op.Output(0)
32069}
32070
32071// Concatenates a list of `N` tensors along the first dimension.
32072//
32073// The input tensors are all required to have size 1 in the first dimension.
32074//
32075// For example:
32076//
32077// ```
32078// # 'x' is [[1, 4]]
32079// # 'y' is [[2, 5]]
32080// # 'z' is [[3, 6]]
32081// parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]]  # Pack along first dim.
32082// ```
32083//
32084// The difference between concat and parallel_concat is that concat requires all
32085// of the inputs be computed before the operation will begin but doesn't require
32086// that the input shapes be known during graph construction.  Parallel concat
32087// will copy pieces of the input into the output as they become available, in
32088// some situations this can provide a performance benefit.
32089//
32090// Arguments:
32091//	values: Tensors to be concatenated. All must have size 1 in the first dimension
32092// and same shape.
32093//	shape: the final shape of the result; should be equal to the shapes of any input
32094// but with the number of input values in the first dimension.
32095//
32096// Returns The concatenated tensor.
32097func ParallelConcat(scope *Scope, values []tf.Output, shape tf.Shape) (output tf.Output) {
32098	if scope.Err() != nil {
32099		return
32100	}
32101	attrs := map[string]interface{}{"shape": shape}
32102	opspec := tf.OpSpec{
32103		Type: "ParallelConcat",
32104		Input: []tf.Input{
32105			tf.OutputList(values),
32106		},
32107		Attrs: attrs,
32108	}
32109	op := scope.AddOperation(opspec)
32110	return op.Output(0)
32111}
32112
32113// AvgPoolGradAttr is an optional argument to AvgPoolGrad.
32114type AvgPoolGradAttr func(optionalAttr)
32115
32116// AvgPoolGradDataFormat sets the optional data_format attribute to value.
32117//
32118// value: Specify the data format of the input and output data. With the
32119// default format "NHWC", the data is stored in the order of:
32120//     [batch, in_height, in_width, in_channels].
32121// Alternatively, the format could be "NCHW", the data storage order of:
32122//     [batch, in_channels, in_height, in_width].
32123// If not specified, defaults to "NHWC"
32124func AvgPoolGradDataFormat(value string) AvgPoolGradAttr {
32125	return func(m optionalAttr) {
32126		m["data_format"] = value
32127	}
32128}
32129
32130// Computes gradients of the average pooling function.
32131//
32132// Arguments:
32133//	orig_input_shape: 1-D.  Shape of the original input to `avg_pool`.
32134//	grad: 4-D with shape `[batch, height, width, channels]`.  Gradients w.r.t.
32135// the output of `avg_pool`.
32136//	ksize: The size of the sliding window for each dimension of the input.
32137//	strides: The stride of the sliding window for each dimension of the input.
32138//	padding: The type of padding algorithm to use.
32139//
32140// Returns 4-D.  Gradients w.r.t. the input of `avg_pool`.
32141func AvgPoolGrad(scope *Scope, orig_input_shape tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...AvgPoolGradAttr) (output tf.Output) {
32142	if scope.Err() != nil {
32143		return
32144	}
32145	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
32146	for _, a := range optional {
32147		a(attrs)
32148	}
32149	opspec := tf.OpSpec{
32150		Type: "AvgPoolGrad",
32151		Input: []tf.Input{
32152			orig_input_shape, grad,
32153		},
32154		Attrs: attrs,
32155	}
32156	op := scope.AddOperation(opspec)
32157	return op.Output(0)
32158}
32159
32160// StringSplitAttr is an optional argument to StringSplit.
32161type StringSplitAttr func(optionalAttr)
32162
32163// StringSplitSkipEmpty sets the optional skip_empty attribute to value.
32164//
32165// value: A `bool`. If `True`, skip the empty strings from the result.
32166// If not specified, defaults to true
32167func StringSplitSkipEmpty(value bool) StringSplitAttr {
32168	return func(m optionalAttr) {
32169		m["skip_empty"] = value
32170	}
32171}
32172
32173// Split elements of `input` based on `delimiter` into a `SparseTensor`.
32174//
32175// Let N be the size of source (typically N will be the batch size). Split each
32176// element of `input` based on `delimiter` and return a `SparseTensor`
32177// containing the splitted tokens. Empty tokens are ignored.
32178//
32179// `delimiter` can be empty, or a string of split characters. If `delimiter` is an
32180//  empty string, each element of `input` is split into individual single-byte
32181//  character strings, including splitting of UTF-8 multibyte sequences. Otherwise
32182//  every character of `delimiter` is a potential split point.
32183//
32184// For example:
32185//   N = 2, input[0] is 'hello world' and input[1] is 'a b c', then the output
32186//   will be
32187//
32188//   indices = [0, 0;
32189//              0, 1;
32190//              1, 0;
32191//              1, 1;
32192//              1, 2]
32193//   shape = [2, 3]
32194//   values = ['hello', 'world', 'a', 'b', 'c']
32195//
32196// Arguments:
32197//	input: 1-D. Strings to split.
32198//	delimiter: 0-D. Delimiter characters (bytes), or empty string.
32199//
32200// Returns:
32201//	indices: A dense matrix of int64 representing the indices of the sparse tensor.
32202//	values: A vector of strings corresponding to the splited values.
32203//	shape: a length-2 vector of int64 representing the shape of the sparse
32204// tensor, where the first value is N and the second value is the maximum number
32205// of tokens in a single input entry.
32206func StringSplit(scope *Scope, input tf.Output, delimiter tf.Output, optional ...StringSplitAttr) (indices tf.Output, values tf.Output, shape tf.Output) {
32207	if scope.Err() != nil {
32208		return
32209	}
32210	attrs := map[string]interface{}{}
32211	for _, a := range optional {
32212		a(attrs)
32213	}
32214	opspec := tf.OpSpec{
32215		Type: "StringSplit",
32216		Input: []tf.Input{
32217			input, delimiter,
32218		},
32219		Attrs: attrs,
32220	}
32221	op := scope.AddOperation(opspec)
32222	return op.Output(0), op.Output(1), op.Output(2)
32223}
32224
32225// Assigns sparse updates to the variable referenced by `resource`.
32226//
32227// This operation computes
32228//
32229//     # Scalar indices
32230//     ref[indices, ...] = updates[...]
32231//
32232//     # Vector indices (for each i)
32233//     ref[indices[i], ...] = updates[i, ...]
32234//
32235//     # High rank indices (for each i, ..., j)
32236//     ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]
32237//
32238// Arguments:
32239//	resource: Should be from a `Variable` node.
32240//	indices: A tensor of indices into the first dimension of `ref`.
32241//	updates: A tensor of updated values to add to `ref`.
32242//
32243// Returns the created operation.
32244func ResourceScatterUpdate(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation) {
32245	if scope.Err() != nil {
32246		return
32247	}
32248	opspec := tf.OpSpec{
32249		Type: "ResourceScatterUpdate",
32250		Input: []tf.Input{
32251			resource, indices, updates,
32252		},
32253	}
32254	return scope.AddOperation(opspec)
32255}
32256
32257// Creates ngrams from ragged string data.
32258//
32259// This op accepts a ragged tensor with 1 ragged dimension containing only
32260// strings and outputs a ragged tensor with 1 ragged dimension containing ngrams
32261// of that string, joined along the innermost axis.
32262//
32263// Arguments:
32264//	data: The values tensor of the ragged string tensor to make ngrams out of. Must be a
32265// 1D string tensor.
32266//	data_splits: The splits tensor of the ragged string tensor to make ngrams out of.
32267//	separator: The string to append between elements of the token. Use "" for no separator.
32268//	ngram_widths: The sizes of the ngrams to create.
32269//	left_pad: The string to use to pad the left side of the ngram sequence. Only used if
32270// pad_width != 0.
32271//	right_pad: The string to use to pad the right side of the ngram sequence. Only used if
32272// pad_width != 0.
32273//	pad_width: The number of padding elements to add to each side of each
32274// sequence. Note that padding will never be greater than 'ngram_widths'-1
32275// regardless of this value. If `pad_width=-1`, then add `max(ngram_widths)-1`
32276// elements.
32277//
32278//
32279// Returns:
32280//	ngrams: The values tensor of the output ngrams ragged tensor.
32281//	ngrams_splits: The splits tensor of the output ngrams ragged tensor.
32282func StringNGrams(scope *Scope, data tf.Output, data_splits tf.Output, separator string, ngram_widths []int64, left_pad string, right_pad string, pad_width int64, preserve_short_sequences bool) (ngrams tf.Output, ngrams_splits tf.Output) {
32283	if scope.Err() != nil {
32284		return
32285	}
32286	attrs := map[string]interface{}{"separator": separator, "ngram_widths": ngram_widths, "left_pad": left_pad, "right_pad": right_pad, "pad_width": pad_width, "preserve_short_sequences": preserve_short_sequences}
32287	opspec := tf.OpSpec{
32288		Type: "StringNGrams",
32289		Input: []tf.Input{
32290			data, data_splits,
32291		},
32292		Attrs: attrs,
32293	}
32294	op := scope.AddOperation(opspec)
32295	return op.Output(0), op.Output(1)
32296}
32297
32298// Reduces sparse updates into the variable referenced by `resource` using the `min` operation.
32299//
32300// This operation computes
32301//
32302//     # Scalar indices
32303//     ref[indices, ...] = min(ref[indices, ...], updates[...])
32304//
32305//     # Vector indices (for each i)
32306//     ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...])
32307//
32308//     # High rank indices (for each i, ..., j)
32309//     ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], updates[i, ..., j, ...])
32310//
32311// Duplicate entries are handled correctly: if multiple `indices` reference
32312// the same location, their contributions are combined.
32313//
32314// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
32315//
32316// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
32317// <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
32318// </div>
32319//
32320// Arguments:
32321//	resource: Should be from a `Variable` node.
32322//	indices: A tensor of indices into the first dimension of `ref`.
32323//	updates: A tensor of updated values to add to `ref`.
32324//
32325// Returns the created operation.
32326func ResourceScatterMin(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation) {
32327	if scope.Err() != nil {
32328		return
32329	}
32330	opspec := tf.OpSpec{
32331		Type: "ResourceScatterMin",
32332		Input: []tf.Input{
32333			resource, indices, updates,
32334		},
32335	}
32336	return scope.AddOperation(opspec)
32337}
32338
32339// Scatters tensor at indices in an input list.
32340//
32341// Each member of the TensorList corresponds to one row of the input tensor,
32342// specified by the given index (see `tf.gather`).
32343//
32344// input_handle: The list to scatter into.
32345// tensor: The input tensor.
32346// indices: The indices used to index into the list.
32347// output_handle: The TensorList.
32348func TensorListScatterIntoExistingList(scope *Scope, input_handle tf.Output, tensor tf.Output, indices tf.Output) (output_handle tf.Output) {
32349	if scope.Err() != nil {
32350		return
32351	}
32352	opspec := tf.OpSpec{
32353		Type: "TensorListScatterIntoExistingList",
32354		Input: []tf.Input{
32355			input_handle, tensor, indices,
32356		},
32357	}
32358	op := scope.AddOperation(opspec)
32359	return op.Output(0)
32360}
32361
32362// Outputs a `Summary` protocol buffer with a tensor and per-plugin data.
32363//
32364// Arguments:
32365//	tag: A string attached to this summary. Used for organization in TensorBoard.
32366//	tensor: A tensor to serialize.
32367//	serialized_summary_metadata: A serialized SummaryMetadata proto. Contains plugin
32368// data.
32369func TensorSummaryV2(scope *Scope, tag tf.Output, tensor tf.Output, serialized_summary_metadata tf.Output) (summary tf.Output) {
32370	if scope.Err() != nil {
32371		return
32372	}
32373	opspec := tf.OpSpec{
32374		Type: "TensorSummaryV2",
32375		Input: []tf.Input{
32376			tag, tensor, serialized_summary_metadata,
32377		},
32378	}
32379	op := scope.AddOperation(opspec)
32380	return op.Output(0)
32381}
32382
32383// Advance the counter of a counter-based RNG.
32384//
32385// The state of the RNG after
32386// `rng_read_and_skip(n)` will be the same as that after `uniform([n])`
32387// (or any other distribution). The actual increment added to the
32388// counter is an unspecified implementation choice.
32389//
32390// Arguments:
32391//	resource: The handle of the resource variable that stores the state of the RNG.
32392//	alg: The RNG algorithm.
32393//	delta: The amount of advancement.
32394//
32395// Returns The old value of the resource variable, before incrementing. Since state size is algorithm-dependent, this output will be right-padded with zeros to reach shape int64[3] (the current maximal state size among algorithms).
32396func RngReadAndSkip(scope *Scope, resource tf.Output, alg tf.Output, delta tf.Output) (value tf.Output) {
32397	if scope.Err() != nil {
32398		return
32399	}
32400	opspec := tf.OpSpec{
32401		Type: "RngReadAndSkip",
32402		Input: []tf.Input{
32403			resource, alg, delta,
32404		},
32405	}
32406	op := scope.AddOperation(opspec)
32407	return op.Output(0)
32408}
32409
32410// Multiplies sparse updates into the variable referenced by `resource`.
32411//
32412// This operation computes
32413//
32414//     # Scalar indices
32415//     ref[indices, ...] *= updates[...]
32416//
32417//     # Vector indices (for each i)
32418//     ref[indices[i], ...] *= updates[i, ...]
32419//
32420//     # High rank indices (for each i, ..., j)
32421//     ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...]
32422//
32423// Duplicate entries are handled correctly: if multiple `indices` reference
32424// the same location, their contributions multiply.
32425//
32426// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
32427//
32428// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
32429// <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
32430// </div>
32431//
32432// Arguments:
32433//	resource: Should be from a `Variable` node.
32434//	indices: A tensor of indices into the first dimension of `ref`.
32435//	updates: A tensor of updated values to add to `ref`.
32436//
32437// Returns the created operation.
32438func ResourceScatterMul(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation) {
32439	if scope.Err() != nil {
32440		return
32441	}
32442	opspec := tf.OpSpec{
32443		Type: "ResourceScatterMul",
32444		Input: []tf.Input{
32445			resource, indices, updates,
32446		},
32447	}
32448	return scope.AddOperation(opspec)
32449}
32450
32451// Compresses a dataset element.
32452func CompressElement(scope *Scope, components []tf.Output) (compressed tf.Output) {
32453	if scope.Err() != nil {
32454		return
32455	}
32456	opspec := tf.OpSpec{
32457		Type: "CompressElement",
32458		Input: []tf.Input{
32459			tf.OutputList(components),
32460		},
32461	}
32462	op := scope.AddOperation(opspec)
32463	return op.Output(0)
32464}
32465
32466// MatMulAttr is an optional argument to MatMul.
32467type MatMulAttr func(optionalAttr)
32468
32469// MatMulTransposeA sets the optional transpose_a attribute to value.
32470//
32471// value: If true, "a" is transposed before multiplication.
32472// If not specified, defaults to false
32473func MatMulTransposeA(value bool) MatMulAttr {
32474	return func(m optionalAttr) {
32475		m["transpose_a"] = value
32476	}
32477}
32478
32479// MatMulTransposeB sets the optional transpose_b attribute to value.
32480//
32481// value: If true, "b" is transposed before multiplication.
32482// If not specified, defaults to false
32483func MatMulTransposeB(value bool) MatMulAttr {
32484	return func(m optionalAttr) {
32485		m["transpose_b"] = value
32486	}
32487}
32488
32489// Multiply the matrix "a" by the matrix "b".
32490//
32491// The inputs must be two-dimensional matrices and the inner dimension of
32492// "a" (after being transposed if transpose_a is true) must match the
32493// outer dimension of "b" (after being transposed if transposed_b is
32494// true).
32495//
32496// *Note*: The default kernel implementation for MatMul on GPUs uses
32497// cublas.
32498func MatMul(scope *Scope, a tf.Output, b tf.Output, optional ...MatMulAttr) (product tf.Output) {
32499	if scope.Err() != nil {
32500		return
32501	}
32502	attrs := map[string]interface{}{}
32503	for _, a := range optional {
32504		a(attrs)
32505	}
32506	opspec := tf.OpSpec{
32507		Type: "MatMul",
32508		Input: []tf.Input{
32509			a, b,
32510		},
32511		Attrs: attrs,
32512	}
32513	op := scope.AddOperation(opspec)
32514	return op.Output(0)
32515}
32516
32517// SparseReduceSumSparseAttr is an optional argument to SparseReduceSumSparse.
32518type SparseReduceSumSparseAttr func(optionalAttr)
32519
32520// SparseReduceSumSparseKeepDims sets the optional keep_dims attribute to value.
32521//
32522// value: If true, retain reduced dimensions with length 1.
32523// If not specified, defaults to false
32524func SparseReduceSumSparseKeepDims(value bool) SparseReduceSumSparseAttr {
32525	return func(m optionalAttr) {
32526		m["keep_dims"] = value
32527	}
32528}
32529
32530// Computes the sum of elements across dimensions of a SparseTensor.
32531//
32532// This Op takes a SparseTensor and is the sparse counterpart to
32533// `tf.reduce_sum()`.  In contrast to SparseReduceSum, this Op returns a
32534// SparseTensor.
32535//
32536// Reduces `sp_input` along the dimensions given in `reduction_axes`.  Unless
32537// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
32538// `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
32539// with length 1.
32540//
32541// If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
32542// with a single element is returned.  Additionally, the axes can be negative,
32543// which are interpreted according to the indexing rules in Python.
32544//
32545// Arguments:
32546//	input_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
32547// SparseTensor, possibly not in canonical ordering.
32548//	input_values: 1-D.  `N` non-empty values corresponding to `input_indices`.
32549//	input_shape: 1-D.  Shape of the input SparseTensor.
32550//	reduction_axes: 1-D.  Length-`K` vector containing the reduction axes.
32551func SparseReduceSumSparse(scope *Scope, input_indices tf.Output, input_values tf.Output, input_shape tf.Output, reduction_axes tf.Output, optional ...SparseReduceSumSparseAttr) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output) {
32552	if scope.Err() != nil {
32553		return
32554	}
32555	attrs := map[string]interface{}{}
32556	for _, a := range optional {
32557		a(attrs)
32558	}
32559	opspec := tf.OpSpec{
32560		Type: "SparseReduceSumSparse",
32561		Input: []tf.Input{
32562			input_indices, input_values, input_shape, reduction_axes,
32563		},
32564		Attrs: attrs,
32565	}
32566	op := scope.AddOperation(opspec)
32567	return op.Output(0), op.Output(1), op.Output(2)
32568}
32569
32570// Computes rectified linear: `max(features, 0)`.
32571//
32572// See: https://en.wikipedia.org/wiki/Rectifier_(neural_networks)
32573// Example usage:
32574// >>> tf.nn.relu([-2., 0., 3.]).numpy()
32575// array([0., 0., 3.], dtype=float32)
32576func Relu(scope *Scope, features tf.Output) (activations tf.Output) {
32577	if scope.Err() != nil {
32578		return
32579	}
32580	opspec := tf.OpSpec{
32581		Type: "Relu",
32582		Input: []tf.Input{
32583			features,
32584		},
32585	}
32586	op := scope.AddOperation(opspec)
32587	return op.Output(0)
32588}
32589
32590// Deserialize and concatenate `SparseTensors` from a serialized minibatch.
32591//
32592// The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where
32593// `N` is the minibatch size and the rows correspond to packed outputs of
32594// `SerializeSparse`.  The ranks of the original `SparseTensor` objects
32595// must all match.  When the final `SparseTensor` is created, it has rank one
32596// higher than the ranks of the incoming `SparseTensor` objects
32597// (they have been concatenated along a new row dimension).
32598//
32599// The output `SparseTensor` object's shape values for all dimensions but the
32600// first are the max across the input `SparseTensor` objects' shape values
32601// for the corresponding dimensions.  Its first shape value is `N`, the minibatch
32602// size.
32603//
32604// The input `SparseTensor` objects' indices are assumed ordered in
32605// standard lexicographic order.  If this is not the case, after this
32606// step run `SparseReorder` to restore index ordering.
32607//
32608// For example, if the serialized input is a `[2 x 3]` matrix representing two
32609// original `SparseTensor` objects:
32610//
32611//     index = [ 0]
32612//             [10]
32613//             [20]
32614//     values = [1, 2, 3]
32615//     shape = [50]
32616//
32617// and
32618//
32619//     index = [ 2]
32620//             [10]
32621//     values = [4, 5]
32622//     shape = [30]
32623//
32624// then the final deserialized `SparseTensor` will be:
32625//
32626//     index = [0  0]
32627//             [0 10]
32628//             [0 20]
32629//             [1  2]
32630//             [1 10]
32631//     values = [1, 2, 3, 4, 5]
32632//     shape = [2 50]
32633//
32634// Arguments:
32635//	serialized_sparse: 2-D, The `N` serialized `SparseTensor` objects.
32636// Must have 3 columns.
32637//	dtype: The `dtype` of the serialized `SparseTensor` objects.
32638func DeserializeManySparse(scope *Scope, serialized_sparse tf.Output, dtype tf.DataType) (sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output) {
32639	if scope.Err() != nil {
32640		return
32641	}
32642	attrs := map[string]interface{}{"dtype": dtype}
32643	opspec := tf.OpSpec{
32644		Type: "DeserializeManySparse",
32645		Input: []tf.Input{
32646			serialized_sparse,
32647		},
32648		Attrs: attrs,
32649	}
32650	op := scope.AddOperation(opspec)
32651	return op.Output(0), op.Output(1), op.Output(2)
32652}
32653
32654// Get the number of nodes in a tree
32655//
32656// Arguments:
32657//	tree_handle: Handle to the tree resource.
32658//
32659// Returns The size of the tree.
32660func TensorForestTreeSize(scope *Scope, tree_handle tf.Output) (tree_size tf.Output) {
32661	if scope.Err() != nil {
32662		return
32663	}
32664	opspec := tf.OpSpec{
32665		Type: "TensorForestTreeSize",
32666		Input: []tf.Input{
32667			tree_handle,
32668		},
32669	}
32670	op := scope.AddOperation(opspec)
32671	return op.Output(0)
32672}
32673
32674// Subtracts sparse updates from the variable referenced by `resource`.
32675//
32676// This operation computes
32677//
32678//     # Scalar indices
32679//     ref[indices, ...] -= updates[...]
32680//
32681//     # Vector indices (for each i)
32682//     ref[indices[i], ...] -= updates[i, ...]
32683//
32684//     # High rank indices (for each i, ..., j)
32685//     ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...]
32686//
32687// Duplicate entries are handled correctly: if multiple `indices` reference
32688// the same location, their contributions add.
32689//
32690// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
32691//
32692// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
32693// <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
32694// </div>
32695//
32696// Arguments:
32697//	resource: Should be from a `Variable` node.
32698//	indices: A tensor of indices into the first dimension of `ref`.
32699//	updates: A tensor of updated values to add to `ref`.
32700//
32701// Returns the created operation.
32702func ResourceScatterSub(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation) {
32703	if scope.Err() != nil {
32704		return
32705	}
32706	opspec := tf.OpSpec{
32707		Type: "ResourceScatterSub",
32708		Input: []tf.Input{
32709			resource, indices, updates,
32710		},
32711	}
32712	return scope.AddOperation(opspec)
32713}
32714
32715// Creates a dataset that emits each dim-0 slice of `components` once.
32716func TensorSliceDataset(scope *Scope, components []tf.Output, output_shapes []tf.Shape) (handle tf.Output) {
32717	if scope.Err() != nil {
32718		return
32719	}
32720	attrs := map[string]interface{}{"output_shapes": output_shapes}
32721	opspec := tf.OpSpec{
32722		Type: "TensorSliceDataset",
32723		Input: []tf.Input{
32724			tf.OutputList(components),
32725		},
32726		Attrs: attrs,
32727	}
32728	op := scope.AddOperation(opspec)
32729	return op.Output(0)
32730}
32731
32732// RetrieveTPUEmbeddingMDLAdagradLightParametersAttr is an optional argument to RetrieveTPUEmbeddingMDLAdagradLightParameters.
32733type RetrieveTPUEmbeddingMDLAdagradLightParametersAttr func(optionalAttr)
32734
32735// RetrieveTPUEmbeddingMDLAdagradLightParametersTableId sets the optional table_id attribute to value.
32736// If not specified, defaults to -1
32737func RetrieveTPUEmbeddingMDLAdagradLightParametersTableId(value int64) RetrieveTPUEmbeddingMDLAdagradLightParametersAttr {
32738	return func(m optionalAttr) {
32739		m["table_id"] = value
32740	}
32741}
32742
32743// RetrieveTPUEmbeddingMDLAdagradLightParametersTableName sets the optional table_name attribute to value.
32744// If not specified, defaults to ""
32745func RetrieveTPUEmbeddingMDLAdagradLightParametersTableName(value string) RetrieveTPUEmbeddingMDLAdagradLightParametersAttr {
32746	return func(m optionalAttr) {
32747		m["table_name"] = value
32748	}
32749}
32750
32751// RetrieveTPUEmbeddingMDLAdagradLightParametersConfig sets the optional config attribute to value.
32752// If not specified, defaults to ""
32753func RetrieveTPUEmbeddingMDLAdagradLightParametersConfig(value string) RetrieveTPUEmbeddingMDLAdagradLightParametersAttr {
32754	return func(m optionalAttr) {
32755		m["config"] = value
32756	}
32757}
32758
32759// Retrieve MDL Adagrad Light embedding parameters.
32760//
32761// An op that retrieves optimization parameters from embedding to host
32762// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
32763// the correct embedding table configuration. For example, this op is
32764// used to retrieve updated parameters before saving a checkpoint.
32765//
32766// Returns:
32767//	parameters: Parameter parameters updated by the MDL Adagrad Light optimization algorithm.
32768//	accumulators: Parameter accumulators updated by the MDL Adagrad Light optimization algorithm.
32769//	weights: Parameter weights updated by the MDL Adagrad Light optimization algorithm.
32770//	benefits: Parameter benefits updated by the MDL Adagrad Light optimization algorithm.
32771func RetrieveTPUEmbeddingMDLAdagradLightParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingMDLAdagradLightParametersAttr) (parameters tf.Output, accumulators tf.Output, weights tf.Output, benefits tf.Output) {
32772	if scope.Err() != nil {
32773		return
32774	}
32775	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
32776	for _, a := range optional {
32777		a(attrs)
32778	}
32779	opspec := tf.OpSpec{
32780		Type: "RetrieveTPUEmbeddingMDLAdagradLightParameters",
32781
32782		Attrs: attrs,
32783	}
32784	op := scope.AddOperation(opspec)
32785	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
32786}
32787
32788// RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebugAttr is an optional argument to RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug.
32789type RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebugAttr func(optionalAttr)
32790
32791// RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebugTableId sets the optional table_id attribute to value.
32792// If not specified, defaults to -1
32793func RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebugTableId(value int64) RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebugAttr {
32794	return func(m optionalAttr) {
32795		m["table_id"] = value
32796	}
32797}
32798
32799// RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebugTableName sets the optional table_name attribute to value.
32800// If not specified, defaults to ""
32801func RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebugTableName(value string) RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebugAttr {
32802	return func(m optionalAttr) {
32803		m["table_name"] = value
32804	}
32805}
32806
32807// RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebugConfig sets the optional config attribute to value.
32808// If not specified, defaults to ""
32809func RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebugConfig(value string) RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebugAttr {
32810	return func(m optionalAttr) {
32811		m["config"] = value
32812	}
32813}
32814
32815// Retrieve frequency estimator embedding parameters with debug support.
32816//
32817// An op that retrieves optimization parameters from embedding to host
32818// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
32819// the correct embedding table configuration. For example, this op is
32820// used to retrieve updated parameters before saving a checkpoint.
32821//
32822// Returns:
32823//	parameters: Parameter parameters updated by the frequency estimator optimization algorithm.
32824//	last_hit_step: Parameter last_hit_step updated by the frequency estimator optimization
32825// algorithm.
32826//	gradient_accumulators: Parameter gradient_accumulators updated by the frequency estimator optimization
32827// algorithm.
32828func RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebugAttr) (parameters tf.Output, last_hit_step tf.Output, gradient_accumulators tf.Output) {
32829	if scope.Err() != nil {
32830		return
32831	}
32832	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
32833	for _, a := range optional {
32834		a(attrs)
32835	}
32836	opspec := tf.OpSpec{
32837		Type: "RetrieveTPUEmbeddingFrequencyEstimatorParametersGradAccumDebug",
32838
32839		Attrs: attrs,
32840	}
32841	op := scope.AddOperation(opspec)
32842	return op.Output(0), op.Output(1), op.Output(2)
32843}
32844
32845// Adds sparse updates to the variable referenced by `resource`.
32846//
32847// This operation computes
32848//
32849//     # Scalar indices
32850//     ref[indices, ...] += updates[...]
32851//
32852//     # Vector indices (for each i)
32853//     ref[indices[i], ...] += updates[i, ...]
32854//
32855//     # High rank indices (for each i, ..., j)
32856//     ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]
32857//
32858// Duplicate entries are handled correctly: if multiple `indices` reference
32859// the same location, their contributions add.
32860//
32861// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
32862//
32863// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
32864// <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
32865// </div>
32866//
32867// Arguments:
32868//	resource: Should be from a `Variable` node.
32869//	indices: A tensor of indices into the first dimension of `ref`.
32870//	updates: A tensor of updated values to add to `ref`.
32871//
32872// Returns the created operation.
32873func ResourceScatterAdd(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation) {
32874	if scope.Err() != nil {
32875		return
32876	}
32877	opspec := tf.OpSpec{
32878		Type: "ResourceScatterAdd",
32879		Input: []tf.Input{
32880			resource, indices, updates,
32881		},
32882	}
32883	return scope.AddOperation(opspec)
32884}
32885
32886// This op consumes a lock created by `MutexLock`.
32887//
32888// This op exists to consume a tensor created by `MutexLock` (other than
32889// direct control dependencies).  It should be the only that consumes the tensor,
32890// and will raise an error if it is not.  Its only purpose is to keep the
32891// mutex lock tensor alive until it is consumed by this op.
32892//
32893// **NOTE**: This operation must run on the same device as its input.  This may
32894// be enforced via the `colocate_with` mechanism.
32895//
32896// Arguments:
32897//	mutex_lock: A tensor returned by `MutexLock`.
32898//
32899// Returns the created operation.
32900func ConsumeMutexLock(scope *Scope, mutex_lock tf.Output) (o *tf.Operation) {
32901	if scope.Err() != nil {
32902		return
32903	}
32904	opspec := tf.OpSpec{
32905		Type: "ConsumeMutexLock",
32906		Input: []tf.Input{
32907			mutex_lock,
32908		},
32909	}
32910	return scope.AddOperation(opspec)
32911}
32912
32913// BiasAddAttr is an optional argument to BiasAdd.
32914type BiasAddAttr func(optionalAttr)
32915
32916// BiasAddDataFormat sets the optional data_format attribute to value.
32917//
32918// value: Specify the data format of the input and output data. With the
32919// default format "NHWC", the bias tensor will be added to the last dimension
32920// of the value tensor.
32921// Alternatively, the format could be "NCHW", the data storage order of:
32922//     [batch, in_channels, in_height, in_width].
32923// The tensor will be added to "in_channels", the third-to-the-last
32924//     dimension.
32925// If not specified, defaults to "NHWC"
32926func BiasAddDataFormat(value string) BiasAddAttr {
32927	return func(m optionalAttr) {
32928		m["data_format"] = value
32929	}
32930}
32931
32932// Adds `bias` to `value`.
32933//
32934// This is a special case of `tf.add` where `bias` is restricted to be 1-D.
32935// Broadcasting is supported, so `value` may have any number of dimensions.
32936//
32937// Arguments:
32938//	value: Any number of dimensions.
32939//	bias: 1-D with size the last dimension of `value`.
32940//
32941// Returns Broadcasted sum of `value` and `bias`.
32942func BiasAdd(scope *Scope, value tf.Output, bias tf.Output, optional ...BiasAddAttr) (output tf.Output) {
32943	if scope.Err() != nil {
32944		return
32945	}
32946	attrs := map[string]interface{}{}
32947	for _, a := range optional {
32948		a(attrs)
32949	}
32950	opspec := tf.OpSpec{
32951		Type: "BiasAdd",
32952		Input: []tf.Input{
32953			value, bias,
32954		},
32955		Attrs: attrs,
32956	}
32957	op := scope.AddOperation(opspec)
32958	return op.Output(0)
32959}
32960
32961// VariableShapeAttr is an optional argument to VariableShape.
32962type VariableShapeAttr func(optionalAttr)
32963
32964// VariableShapeOutType sets the optional out_type attribute to value.
32965// If not specified, defaults to DT_INT32
32966func VariableShapeOutType(value tf.DataType) VariableShapeAttr {
32967	return func(m optionalAttr) {
32968		m["out_type"] = value
32969	}
32970}
32971
32972// Returns the shape of the variable pointed to by `resource`.
32973//
32974// This operation returns a 1-D integer tensor representing the shape of `input`.
32975//
32976// For example:
32977//
32978// ```
32979// # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
32980// shape(t) ==> [2, 2, 3]
32981// ```
32982func VariableShape(scope *Scope, input tf.Output, optional ...VariableShapeAttr) (output tf.Output) {
32983	if scope.Err() != nil {
32984		return
32985	}
32986	attrs := map[string]interface{}{}
32987	for _, a := range optional {
32988		a(attrs)
32989	}
32990	opspec := tf.OpSpec{
32991		Type: "VariableShape",
32992		Input: []tf.Input{
32993			input,
32994		},
32995		Attrs: attrs,
32996	}
32997	op := scope.AddOperation(opspec)
32998	return op.Output(0)
32999}
33000
33001// Wraps the XLA Sort operator, documented at
33002//
33003//  https://www.tensorflow.org/performance/xla/operation_semantics#sort
33004// .
33005//
33006// Sorts a tensor. Currently only sorts in ascending order are supported.
33007//
33008// Arguments:
33009//	keys: A `Tensor` of type K.
33010//	values: A `Tensor` of type V.
33011//
33012// Returns:
33013//	sorted_keys: A `Tensor` of type K.
33014//	sorted_values: A `Tensor` of type V.
33015func XlaKeyValueSort(scope *Scope, keys tf.Output, values tf.Output) (sorted_keys tf.Output, sorted_values tf.Output) {
33016	if scope.Err() != nil {
33017		return
33018	}
33019	opspec := tf.OpSpec{
33020		Type: "XlaKeyValueSort",
33021		Input: []tf.Input{
33022			keys, values,
33023		},
33024	}
33025	op := scope.AddOperation(opspec)
33026	return op.Output(0), op.Output(1)
33027}
33028
33029// Scrambles seed into key and counter, using the best algorithm based on device.
33030//
33031// This op scrambles a shape-[2] seed into a key and a counter, both needed by counter-based RNG algorithms. The scrambing uses the best algorithm based on device. The scrambling is opaque but approximately satisfies the property that different seed results in different key/counter pair (which will in turn result in different random numbers).
33032//
33033// Arguments:
33034//	seed: 2 seeds (shape [2]).
33035//
33036// Returns:
33037//	key: Key for the counter-based RNG algorithm (shape uint64[1]).
33038//	counter: Counter for the counter-based RNG algorithm. Since counter size is algorithm-dependent, this output will be right-padded with zeros to reach shape uint64[2] (the current maximal counter size among algorithms).
33039func StatelessRandomGetKeyCounter(scope *Scope, seed tf.Output) (key tf.Output, counter tf.Output) {
33040	if scope.Err() != nil {
33041		return
33042	}
33043	opspec := tf.OpSpec{
33044		Type: "StatelessRandomGetKeyCounter",
33045		Input: []tf.Input{
33046			seed,
33047		},
33048	}
33049	op := scope.AddOperation(opspec)
33050	return op.Output(0), op.Output(1)
33051}
33052
33053// Asserts that compilation succeeded. This op produces no output and closes the
33054//
33055// device during failure to ensure all pending device interactions fail.
33056//
33057// 'compilation_status' is a serialized CompilationResultProto.
33058//
33059// Returns the created operation.
33060func TPUCompileSucceededAssert(scope *Scope, compilation_status tf.Output) (o *tf.Operation) {
33061	if scope.Err() != nil {
33062		return
33063	}
33064	opspec := tf.OpSpec{
33065		Type: "TPUCompileSucceededAssert",
33066		Input: []tf.Input{
33067			compilation_status,
33068		},
33069	}
33070	return scope.AddOperation(opspec)
33071}
33072
33073// ComputeAccidentalHitsAttr is an optional argument to ComputeAccidentalHits.
33074type ComputeAccidentalHitsAttr func(optionalAttr)
33075
33076// ComputeAccidentalHitsSeed sets the optional seed attribute to value.
33077//
33078// value: If either seed or seed2 are set to be non-zero, the random number
33079// generator is seeded by the given seed.  Otherwise, it is seeded by a
33080// random seed.
33081// If not specified, defaults to 0
33082func ComputeAccidentalHitsSeed(value int64) ComputeAccidentalHitsAttr {
33083	return func(m optionalAttr) {
33084		m["seed"] = value
33085	}
33086}
33087
33088// ComputeAccidentalHitsSeed2 sets the optional seed2 attribute to value.
33089//
33090// value: An second seed to avoid seed collision.
33091// If not specified, defaults to 0
33092func ComputeAccidentalHitsSeed2(value int64) ComputeAccidentalHitsAttr {
33093	return func(m optionalAttr) {
33094		m["seed2"] = value
33095	}
33096}
33097
33098// Computes the ids of the positions in sampled_candidates that match true_labels.
33099//
33100// When doing log-odds NCE, the result of this op should be passed through a
33101// SparseToDense op, then added to the logits of the sampled candidates. This has
33102// the effect of 'removing' the sampled labels that match the true labels by
33103// making the classifier sure that they are sampled labels.
33104//
33105// Arguments:
33106//	true_classes: The true_classes output of UnpackSparseLabels.
33107//	sampled_candidates: The sampled_candidates output of CandidateSampler.
33108//	num_true: Number of true labels per context.
33109//
33110// Returns:
33111//	indices: A vector of indices corresponding to rows of true_candidates.
33112//	ids: A vector of IDs of positions in sampled_candidates that match a true_label
33113// for the row with the corresponding index in indices.
33114//	weights: A vector of the same length as indices and ids, in which each element
33115// is -FLOAT_MAX.
33116func ComputeAccidentalHits(scope *Scope, true_classes tf.Output, sampled_candidates tf.Output, num_true int64, optional ...ComputeAccidentalHitsAttr) (indices tf.Output, ids tf.Output, weights tf.Output) {
33117	if scope.Err() != nil {
33118		return
33119	}
33120	attrs := map[string]interface{}{"num_true": num_true}
33121	for _, a := range optional {
33122		a(attrs)
33123	}
33124	opspec := tf.OpSpec{
33125		Type: "ComputeAccidentalHits",
33126		Input: []tf.Input{
33127			true_classes, sampled_candidates,
33128		},
33129		Attrs: attrs,
33130	}
33131	op := scope.AddOperation(opspec)
33132	return op.Output(0), op.Output(1), op.Output(2)
33133}
33134
33135// VarHandleOpAttr is an optional argument to VarHandleOp.
33136type VarHandleOpAttr func(optionalAttr)
33137
33138// VarHandleOpContainer sets the optional container attribute to value.
33139//
33140// value: the container this variable is placed in.
33141// If not specified, defaults to ""
33142func VarHandleOpContainer(value string) VarHandleOpAttr {
33143	return func(m optionalAttr) {
33144		m["container"] = value
33145	}
33146}
33147
33148// VarHandleOpSharedName sets the optional shared_name attribute to value.
33149//
33150// value: the name by which this variable is referred to.
33151// If not specified, defaults to ""
33152func VarHandleOpSharedName(value string) VarHandleOpAttr {
33153	return func(m optionalAttr) {
33154		m["shared_name"] = value
33155	}
33156}
33157
33158// VarHandleOpAllowedDevices sets the optional allowed_devices attribute to value.
33159//
33160// value: DEPRECATED. The allowed devices containing the resource variable. Set when the
33161// output ResourceHandle represents a per-replica/partitioned resource variable.
33162// If not specified, defaults to <>
33163func VarHandleOpAllowedDevices(value []string) VarHandleOpAttr {
33164	return func(m optionalAttr) {
33165		m["allowed_devices"] = value
33166	}
33167}
33168
33169// Creates a handle to a Variable resource.
33170//
33171// Arguments:
33172//	dtype: the type of this variable. Must agree with the dtypes
33173// of all ops using this variable.
33174//	shape: The (possibly partially specified) shape of this variable.
33175func VarHandleOp(scope *Scope, dtype tf.DataType, shape tf.Shape, optional ...VarHandleOpAttr) (resource tf.Output) {
33176	if scope.Err() != nil {
33177		return
33178	}
33179	attrs := map[string]interface{}{"dtype": dtype, "shape": shape}
33180	for _, a := range optional {
33181		a(attrs)
33182	}
33183	opspec := tf.OpSpec{
33184		Type: "VarHandleOp",
33185
33186		Attrs: attrs,
33187	}
33188	op := scope.AddOperation(opspec)
33189	return op.Output(0)
33190}
33191
33192// BoostedTreesQuantileStreamResourceHandleOpAttr is an optional argument to BoostedTreesQuantileStreamResourceHandleOp.
33193type BoostedTreesQuantileStreamResourceHandleOpAttr func(optionalAttr)
33194
33195// BoostedTreesQuantileStreamResourceHandleOpContainer sets the optional container attribute to value.
33196// If not specified, defaults to ""
33197func BoostedTreesQuantileStreamResourceHandleOpContainer(value string) BoostedTreesQuantileStreamResourceHandleOpAttr {
33198	return func(m optionalAttr) {
33199		m["container"] = value
33200	}
33201}
33202
33203// BoostedTreesQuantileStreamResourceHandleOpSharedName sets the optional shared_name attribute to value.
33204// If not specified, defaults to ""
33205func BoostedTreesQuantileStreamResourceHandleOpSharedName(value string) BoostedTreesQuantileStreamResourceHandleOpAttr {
33206	return func(m optionalAttr) {
33207		m["shared_name"] = value
33208	}
33209}
33210
33211// Creates a handle to a BoostedTreesQuantileStreamResource.
33212func BoostedTreesQuantileStreamResourceHandleOp(scope *Scope, optional ...BoostedTreesQuantileStreamResourceHandleOpAttr) (resource tf.Output) {
33213	if scope.Err() != nil {
33214		return
33215	}
33216	attrs := map[string]interface{}{}
33217	for _, a := range optional {
33218		a(attrs)
33219	}
33220	opspec := tf.OpSpec{
33221		Type: "BoostedTreesQuantileStreamResourceHandleOp",
33222
33223		Attrs: attrs,
33224	}
33225	op := scope.AddOperation(opspec)
33226	return op.Output(0)
33227}
33228
33229// XlaShardingAttr is an optional argument to XlaSharding.
33230type XlaShardingAttr func(optionalAttr)
33231
33232// XlaShardingSharding sets the optional sharding attribute to value.
33233// If not specified, defaults to ""
33234func XlaShardingSharding(value string) XlaShardingAttr {
33235	return func(m optionalAttr) {
33236		m["sharding"] = value
33237	}
33238}
33239
33240// An op which shards the input based on the given sharding attribute.
33241func XlaSharding(scope *Scope, input tf.Output, optional ...XlaShardingAttr) (output tf.Output) {
33242	if scope.Err() != nil {
33243		return
33244	}
33245	attrs := map[string]interface{}{}
33246	for _, a := range optional {
33247		a(attrs)
33248	}
33249	opspec := tf.OpSpec{
33250		Type: "XlaSharding",
33251		Input: []tf.Input{
33252			input,
33253		},
33254		Attrs: attrs,
33255	}
33256	op := scope.AddOperation(opspec)
33257	return op.Output(0)
33258}
33259
33260// EagerPyFuncAttr is an optional argument to EagerPyFunc.
33261type EagerPyFuncAttr func(optionalAttr)
33262
33263// EagerPyFuncIsAsync sets the optional is_async attribute to value.
33264// If not specified, defaults to false
33265func EagerPyFuncIsAsync(value bool) EagerPyFuncAttr {
33266	return func(m optionalAttr) {
33267		m["is_async"] = value
33268	}
33269}
33270
33271// Eagerly executes a python function to compute func(input)->output. The
33272//
33273// semantics of the input, output, and attributes are the same as those for
33274// PyFunc.
33275func EagerPyFunc(scope *Scope, input []tf.Output, token string, Tout []tf.DataType, optional ...EagerPyFuncAttr) (output []tf.Output) {
33276	if scope.Err() != nil {
33277		return
33278	}
33279	attrs := map[string]interface{}{"token": token, "Tout": Tout}
33280	for _, a := range optional {
33281		a(attrs)
33282	}
33283	opspec := tf.OpSpec{
33284		Type: "EagerPyFunc",
33285		Input: []tf.Input{
33286			tf.OutputList(input),
33287		},
33288		Attrs: attrs,
33289	}
33290	op := scope.AddOperation(opspec)
33291	if scope.Err() != nil {
33292		return
33293	}
33294	var idx int
33295	var err error
33296	if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
33297		scope.UpdateErr("EagerPyFunc", err)
33298		return
33299	}
33300	return output
33301}
33302
33303// SdcaOptimizerV2Attr is an optional argument to SdcaOptimizerV2.
33304type SdcaOptimizerV2Attr func(optionalAttr)
33305
33306// SdcaOptimizerV2Adaptive sets the optional adaptive attribute to value.
33307//
33308// value: Whether to use Adaptive SDCA for the inner loop.
33309// If not specified, defaults to true
33310func SdcaOptimizerV2Adaptive(value bool) SdcaOptimizerV2Attr {
33311	return func(m optionalAttr) {
33312		m["adaptive"] = value
33313	}
33314}
33315
33316// Distributed version of Stochastic Dual Coordinate Ascent (SDCA) optimizer for
33317//
33318// linear models with L1 + L2 regularization. As global optimization objective is
33319// strongly-convex, the optimizer optimizes the dual objective at each step. The
33320// optimizer applies each update one example at a time. Examples are sampled
33321// uniformly, and the optimizer is learning rate free and enjoys linear convergence
33322// rate.
33323//
33324// [Proximal Stochastic Dual Coordinate Ascent](http://arxiv.org/pdf/1211.2717v1.pdf).<br>
33325// Shai Shalev-Shwartz, Tong Zhang. 2012
33326//
33327// $$Loss Objective = \sum f_{i} (wx_{i}) + (l2 / 2) * |w|^2 + l1 * |w|$$
33328//
33329// [Adding vs. Averaging in Distributed Primal-Dual Optimization](http://arxiv.org/abs/1502.03508).<br>
33330// Chenxin Ma, Virginia Smith, Martin Jaggi, Michael I. Jordan,
33331// Peter Richtarik, Martin Takac. 2015
33332//
33333// [Stochastic Dual Coordinate Ascent with Adaptive Probabilities](https://arxiv.org/abs/1502.08053).<br>
33334// Dominik Csiba, Zheng Qu, Peter Richtarik. 2015
33335//
33336// Arguments:
33337//	sparse_example_indices: a list of vectors which contain example indices.
33338//	sparse_feature_indices: a list of vectors which contain feature indices.
33339//	sparse_feature_values: a list of vectors which contains feature value
33340// associated with each feature group.
33341//	dense_features: a list of matrices which contains the dense feature values.
33342//	example_weights: a vector which contains the weight associated with each
33343// example.
33344//	example_labels: a vector which contains the label/target associated with each
33345// example.
33346//	sparse_indices: a list of vectors where each value is the indices which has
33347// corresponding weights in sparse_weights. This field maybe omitted for the
33348// dense approach.
33349//	sparse_weights: a list of vectors where each value is the weight associated with
33350// a sparse feature group.
33351//	dense_weights: a list of vectors where the values are the weights associated
33352// with a dense feature group.
33353//	example_state_data: a list of vectors containing the example state data.
33354//	loss_type: Type of the primal loss. Currently SdcaSolver supports logistic,
33355// squared and hinge losses.
33356//	l1: Symmetric l1 regularization strength.
33357//	l2: Symmetric l2 regularization strength.
33358//	num_loss_partitions: Number of partitions of the global loss function.
33359//	num_inner_iterations: Number of iterations per mini-batch.
33360//
33361// Returns:
33362//	out_example_state_data: a list of vectors containing the updated example state
33363// data.
33364//	out_delta_sparse_weights: a list of vectors where each value is the delta
33365// weights associated with a sparse feature group.
33366//	out_delta_dense_weights: a list of vectors where the values are the delta
33367// weights associated with a dense feature group.
33368func SdcaOptimizerV2(scope *Scope, sparse_example_indices []tf.Output, sparse_feature_indices []tf.Output, sparse_feature_values []tf.Output, dense_features []tf.Output, example_weights tf.Output, example_labels tf.Output, sparse_indices []tf.Output, sparse_weights []tf.Output, dense_weights []tf.Output, example_state_data tf.Output, loss_type string, l1 float32, l2 float32, num_loss_partitions int64, num_inner_iterations int64, optional ...SdcaOptimizerV2Attr) (out_example_state_data tf.Output, out_delta_sparse_weights []tf.Output, out_delta_dense_weights []tf.Output) {
33369	if scope.Err() != nil {
33370		return
33371	}
33372	attrs := map[string]interface{}{"loss_type": loss_type, "l1": l1, "l2": l2, "num_loss_partitions": num_loss_partitions, "num_inner_iterations": num_inner_iterations}
33373	for _, a := range optional {
33374		a(attrs)
33375	}
33376	opspec := tf.OpSpec{
33377		Type: "SdcaOptimizerV2",
33378		Input: []tf.Input{
33379			tf.OutputList(sparse_example_indices), tf.OutputList(sparse_feature_indices), tf.OutputList(sparse_feature_values), tf.OutputList(dense_features), example_weights, example_labels, tf.OutputList(sparse_indices), tf.OutputList(sparse_weights), tf.OutputList(dense_weights), example_state_data,
33380		},
33381		Attrs: attrs,
33382	}
33383	op := scope.AddOperation(opspec)
33384	if scope.Err() != nil {
33385		return
33386	}
33387	var idx int
33388	var err error
33389	out_example_state_data = op.Output(idx)
33390	if out_delta_sparse_weights, idx, err = makeOutputList(op, idx, "out_delta_sparse_weights"); err != nil {
33391		scope.UpdateErr("SdcaOptimizerV2", err)
33392		return
33393	}
33394	if out_delta_dense_weights, idx, err = makeOutputList(op, idx, "out_delta_dense_weights"); err != nil {
33395		scope.UpdateErr("SdcaOptimizerV2", err)
33396		return
33397	}
33398	return out_example_state_data, out_delta_sparse_weights, out_delta_dense_weights
33399}
33400
33401// MaxPool3DGradGradAttr is an optional argument to MaxPool3DGradGrad.
33402type MaxPool3DGradGradAttr func(optionalAttr)
33403
33404// MaxPool3DGradGradDataFormat sets the optional data_format attribute to value.
33405//
33406// value: The data format of the input and output data. With the
33407// default format "NDHWC", the data is stored in the order of:
33408//     [batch, in_depth, in_height, in_width, in_channels].
33409// Alternatively, the format could be "NCDHW", the data storage order is:
33410//     [batch, in_channels, in_depth, in_height, in_width].
33411// If not specified, defaults to "NDHWC"
33412func MaxPool3DGradGradDataFormat(value string) MaxPool3DGradGradAttr {
33413	return func(m optionalAttr) {
33414		m["data_format"] = value
33415	}
33416}
33417
33418// Computes second-order gradients of the maxpooling function.
33419//
33420// Arguments:
33421//	orig_input: The original input tensor.
33422//	orig_output: The original output tensor.
33423//	grad: Output backprop of shape `[batch, depth, rows, cols, channels]`.
33424//	ksize: 1-D tensor of length 5. The size of the window for each dimension of
33425// the input tensor. Must have `ksize[0] = ksize[4] = 1`.
33426//	strides: 1-D tensor of length 5. The stride of the sliding window for each
33427// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
33428//	padding: The type of padding algorithm to use.
33429//
33430// Returns Gradients of gradients w.r.t. the input to `max_pool`.
33431func MaxPool3DGradGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPool3DGradGradAttr) (output tf.Output) {
33432	if scope.Err() != nil {
33433		return
33434	}
33435	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
33436	for _, a := range optional {
33437		a(attrs)
33438	}
33439	opspec := tf.OpSpec{
33440		Type: "MaxPool3DGradGrad",
33441		Input: []tf.Input{
33442			orig_input, orig_output, grad,
33443		},
33444		Attrs: attrs,
33445	}
33446	op := scope.AddOperation(opspec)
33447	return op.Output(0)
33448}
33449
33450// IgnoreErrorsDatasetAttr is an optional argument to IgnoreErrorsDataset.
33451type IgnoreErrorsDatasetAttr func(optionalAttr)
33452
33453// IgnoreErrorsDatasetLogWarning sets the optional log_warning attribute to value.
33454// If not specified, defaults to false
33455func IgnoreErrorsDatasetLogWarning(value bool) IgnoreErrorsDatasetAttr {
33456	return func(m optionalAttr) {
33457		m["log_warning"] = value
33458	}
33459}
33460
33461// Creates a dataset that contains the elements of `input_dataset` ignoring errors.
33462func IgnoreErrorsDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...IgnoreErrorsDatasetAttr) (handle tf.Output) {
33463	if scope.Err() != nil {
33464		return
33465	}
33466	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
33467	for _, a := range optional {
33468		a(attrs)
33469	}
33470	opspec := tf.OpSpec{
33471		Type: "IgnoreErrorsDataset",
33472		Input: []tf.Input{
33473			input_dataset,
33474		},
33475		Attrs: attrs,
33476	}
33477	op := scope.AddOperation(opspec)
33478	return op.Output(0)
33479}
33480
33481// Deprecated. Use TensorArrayGradV3
33482//
33483// DEPRECATED at GraphDef version 26: Use TensorArrayWriteV3
33484func TensorArrayWriteV2(scope *Scope, handle tf.Output, index tf.Output, value tf.Output, flow_in tf.Output) (flow_out tf.Output) {
33485	if scope.Err() != nil {
33486		return
33487	}
33488	opspec := tf.OpSpec{
33489		Type: "TensorArrayWriteV2",
33490		Input: []tf.Input{
33491			handle, index, value, flow_in,
33492		},
33493	}
33494	op := scope.AddOperation(opspec)
33495	return op.Output(0)
33496}
33497
33498// DenseToSparseSetOperationAttr is an optional argument to DenseToSparseSetOperation.
33499type DenseToSparseSetOperationAttr func(optionalAttr)
33500
33501// DenseToSparseSetOperationValidateIndices sets the optional validate_indices attribute to value.
33502// If not specified, defaults to true
33503func DenseToSparseSetOperationValidateIndices(value bool) DenseToSparseSetOperationAttr {
33504	return func(m optionalAttr) {
33505		m["validate_indices"] = value
33506	}
33507}
33508
33509// Applies set operation along last dimension of `Tensor` and `SparseTensor`.
33510//
33511// See SetOperationOp::SetOperationFromContext for values of `set_operation`.
33512//
33513// Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`,
33514// and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same
33515// as `set1`. Dimension `n` contains values in a set, duplicates are allowed but
33516// ignored.
33517//
33518// If `validate_indices` is `True`, this op validates the order and range of `set2`
33519// indices.
33520//
33521// Output `result` is a `SparseTensor` represented by `result_indices`,
33522// `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this
33523// has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`
33524// dimension contains the result of `set_operation` applied to the corresponding
33525// `[0...n-1]` dimension of `set`.
33526//
33527// Arguments:
33528//	set1: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`.
33529// Dimension `n` contains values in a set, duplicates are allowed but ignored.
33530//	set2_indices: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major
33531// order.
33532//	set2_values: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major
33533// order.
33534//	set2_shape: 1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must
33535// be the same as the 1st `n-1` dimensions of `set1`, `result_shape[n]` is the
33536// max set size across `n-1` dimensions.
33537//
33538//
33539// Returns:
33540//	result_indices: 2D indices of a `SparseTensor`.
33541//	result_values: 1D values of a `SparseTensor`.
33542//	result_shape: 1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is
33543// the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`
33544// is the max result set size across all `0...n-1` dimensions.
33545func DenseToSparseSetOperation(scope *Scope, set1 tf.Output, set2_indices tf.Output, set2_values tf.Output, set2_shape tf.Output, set_operation string, optional ...DenseToSparseSetOperationAttr) (result_indices tf.Output, result_values tf.Output, result_shape tf.Output) {
33546	if scope.Err() != nil {
33547		return
33548	}
33549	attrs := map[string]interface{}{"set_operation": set_operation}
33550	for _, a := range optional {
33551		a(attrs)
33552	}
33553	opspec := tf.OpSpec{
33554		Type: "DenseToSparseSetOperation",
33555		Input: []tf.Input{
33556			set1, set2_indices, set2_values, set2_shape,
33557		},
33558		Attrs: attrs,
33559	}
33560	op := scope.AddOperation(opspec)
33561	return op.Output(0), op.Output(1), op.Output(2)
33562}
33563
33564// MutexV2Attr is an optional argument to MutexV2.
33565type MutexV2Attr func(optionalAttr)
33566
33567// MutexV2Container sets the optional container attribute to value.
33568//
33569// value: If non-empty, this variable is placed in the given container.
33570// Otherwise, a default container is used.
33571// If not specified, defaults to ""
33572func MutexV2Container(value string) MutexV2Attr {
33573	return func(m optionalAttr) {
33574		m["container"] = value
33575	}
33576}
33577
33578// MutexV2SharedName sets the optional shared_name attribute to value.
33579//
33580// value: If non-empty, this variable is named in the given bucket
33581// with this shared_name. Otherwise, the node name is used instead.
33582// If not specified, defaults to ""
33583func MutexV2SharedName(value string) MutexV2Attr {
33584	return func(m optionalAttr) {
33585		m["shared_name"] = value
33586	}
33587}
33588
33589// Creates a Mutex resource that can be locked by `MutexLock`.
33590//
33591// Returns The mutex resource.
33592func MutexV2(scope *Scope, optional ...MutexV2Attr) (resource tf.Output) {
33593	if scope.Err() != nil {
33594		return
33595	}
33596	attrs := map[string]interface{}{}
33597	for _, a := range optional {
33598		a(attrs)
33599	}
33600	opspec := tf.OpSpec{
33601		Type: "MutexV2",
33602
33603		Attrs: attrs,
33604	}
33605	op := scope.AddOperation(opspec)
33606	return op.Output(0)
33607}
33608
33609// Returns the truth value of (x < y) element-wise.
33610//
33611// *NOTE*: `Less` supports broadcasting. More about broadcasting
33612// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
33613//
33614// Example:
33615//
33616// ```python
33617// x = tf.constant([5, 4, 6])
33618// y = tf.constant([5])
33619// tf.math.less(x, y) ==> [False, True, False]
33620//
33621// x = tf.constant([5, 4, 6])
33622// y = tf.constant([5, 6, 7])
33623// tf.math.less(x, y) ==> [False, True, True]
33624// ```
33625func Less(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
33626	if scope.Err() != nil {
33627		return
33628	}
33629	opspec := tf.OpSpec{
33630		Type: "Less",
33631		Input: []tf.Input{
33632			x, y,
33633		},
33634	}
33635	op := scope.AddOperation(opspec)
33636	return op.Output(0)
33637}
33638
33639// MaxPoolGradGradWithArgmaxAttr is an optional argument to MaxPoolGradGradWithArgmax.
33640type MaxPoolGradGradWithArgmaxAttr func(optionalAttr)
33641
33642// MaxPoolGradGradWithArgmaxIncludeBatchInIndex sets the optional include_batch_in_index attribute to value.
33643//
33644// value: Whether to include batch dimension in flattened index of `argmax`.
33645// If not specified, defaults to false
33646func MaxPoolGradGradWithArgmaxIncludeBatchInIndex(value bool) MaxPoolGradGradWithArgmaxAttr {
33647	return func(m optionalAttr) {
33648		m["include_batch_in_index"] = value
33649	}
33650}
33651
33652// Computes second-order gradients of the maxpooling function.
33653//
33654// Arguments:
33655//	input: The original input.
33656//	grad: 4-D with shape `[batch, height, width, channels]`.  Gradients w.r.t. the
33657// input of `max_pool`.
33658//	argmax: The indices of the maximum values chosen for each output of `max_pool`.
33659//	ksize: The size of the window for each dimension of the input tensor.
33660//	strides: The stride of the sliding window for each dimension of the
33661// input tensor.
33662//	padding: The type of padding algorithm to use.
33663//
33664// Returns Gradients of gradients w.r.t. the input of `max_pool`.
33665func MaxPoolGradGradWithArgmax(scope *Scope, input tf.Output, grad tf.Output, argmax tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPoolGradGradWithArgmaxAttr) (output tf.Output) {
33666	if scope.Err() != nil {
33667		return
33668	}
33669	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
33670	for _, a := range optional {
33671		a(attrs)
33672	}
33673	opspec := tf.OpSpec{
33674		Type: "MaxPoolGradGradWithArgmax",
33675		Input: []tf.Input{
33676			input, grad, argmax,
33677		},
33678		Attrs: attrs,
33679	}
33680	op := scope.AddOperation(opspec)
33681	return op.Output(0)
33682}
33683
33684// StridedSliceAttr is an optional argument to StridedSlice.
33685type StridedSliceAttr func(optionalAttr)
33686
33687// StridedSliceBeginMask sets the optional begin_mask attribute to value.
33688//
33689// value: a bitmask where a bit i being 1 means to ignore the begin
33690// value and instead use the largest interval possible. At runtime
33691// begin[i] will be replaced with `[0, n-1)` if `stride[i] > 0` or
33692// `[-1, n-1]` if `stride[i] < 0`
33693// If not specified, defaults to 0
33694func StridedSliceBeginMask(value int64) StridedSliceAttr {
33695	return func(m optionalAttr) {
33696		m["begin_mask"] = value
33697	}
33698}
33699
33700// StridedSliceEndMask sets the optional end_mask attribute to value.
33701//
33702// value: analogous to `begin_mask`
33703// If not specified, defaults to 0
33704func StridedSliceEndMask(value int64) StridedSliceAttr {
33705	return func(m optionalAttr) {
33706		m["end_mask"] = value
33707	}
33708}
33709
33710// StridedSliceEllipsisMask sets the optional ellipsis_mask attribute to value.
33711//
33712// value: a bitmask where bit `i` being 1 means the `i`th
33713// position is actually an ellipsis. One bit at most can be 1.
33714// If `ellipsis_mask == 0`, then an implicit ellipsis mask of `1 << (m+1)`
33715// is provided. This means that `foo[3:5] == foo[3:5, ...]`. An ellipsis
33716// implicitly creates as many range specifications as necessary to fully
33717// specify the sliced range for every dimension. For example for a 4-dimensional
33718// tensor `foo` the slice `foo[2, ..., 5:8]` implies `foo[2, :, :, 5:8]`.
33719// If not specified, defaults to 0
33720func StridedSliceEllipsisMask(value int64) StridedSliceAttr {
33721	return func(m optionalAttr) {
33722		m["ellipsis_mask"] = value
33723	}
33724}
33725
33726// StridedSliceNewAxisMask sets the optional new_axis_mask attribute to value.
33727//
33728// value: a bitmask where bit `i` being 1 means the `i`th
33729// specification creates a new shape 1 dimension. For example
33730// `foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor.
33731// If not specified, defaults to 0
33732func StridedSliceNewAxisMask(value int64) StridedSliceAttr {
33733	return func(m optionalAttr) {
33734		m["new_axis_mask"] = value
33735	}
33736}
33737
33738// StridedSliceShrinkAxisMask sets the optional shrink_axis_mask attribute to value.
33739//
33740// value: a bitmask where bit `i` implies that the `i`th
33741// specification should shrink the dimensionality. begin and end
33742// must imply a slice of size 1 in the dimension. For example in
33743// python one might do `foo[:, 3, :]` which would result in
33744// `shrink_axis_mask` being 2.
33745// If not specified, defaults to 0
33746func StridedSliceShrinkAxisMask(value int64) StridedSliceAttr {
33747	return func(m optionalAttr) {
33748		m["shrink_axis_mask"] = value
33749	}
33750}
33751
33752// Return a strided slice from `input`.
33753//
33754// Note, most python users will want to use the Python `Tensor.__getitem__`
33755// or `Variable.__getitem__` rather than this op directly.
33756//
33757// The goal of this op is to produce a new tensor with a subset of
33758// the elements from the `n` dimensional `input` tensor. The subset is chosen using
33759// a sequence of `m` sparse range specifications encoded into the arguments
33760// of this function. Note, in some cases
33761// `m` could be equal to `n`, but this need not be the case. Each
33762// range specification entry can be one of the following:
33763//
33764// - An ellipsis (...). Ellipses are used to imply zero or more
33765//   dimensions of full-dimension selection and are produced using
33766//   `ellipsis_mask`. For example, `foo[...]` is the identity slice.
33767//
33768// - A new axis. This is used to insert a new shape=1 dimension and is
33769//   produced using `new_axis_mask`. For example, `foo[:, ...]` where
33770//   `foo` is shape `(3, 4)` produces a `(1, 3, 4)` tensor.
33771//
33772//
33773// - A range `begin:end:stride`. This is used to specify how much to choose from
33774//   a given dimension. `stride` can be any integer but 0.  `begin` is an integer
33775//   which represents the index of the first value to select while `end` represents
33776//   the index of the last value to select. The number of values selected in each
33777//   dimension is `end - begin` if `stride > 0` and `begin - end` if `stride < 0`.
33778//   `begin` and `end` can be negative where `-1` is the last element, `-2` is
33779//   the second to last. `begin_mask` controls whether to replace the explicitly
33780//   given `begin` with an implicit effective value of `0` if `stride > 0` and
33781//   `-1` if `stride < 0`. `end_mask` is analogous but produces the number
33782//   required to create the largest open interval. For example, given a shape
33783//   `(3,)` tensor `foo[:]`, the effective `begin` and `end` are `0` and `3`. Do
33784//   not assume this is equivalent to `foo[0:-1]` which has an effective `begin`
33785//   and `end` of `0` and `2`. Another example is `foo[-2::-1]` which reverses the
33786//   first dimension of a tensor while dropping the last two (in the original
33787//   order elements). For example `foo = [1,2,3,4]; foo[-2::-1]` is `[4,3]`.
33788//
33789// - A single index. This is used to keep only elements that have a given
33790//   index. For example (`foo[2, :]` on a shape `(5,6)` tensor produces a
33791//   shape `(6,)` tensor. This is encoded in `begin` and `end` and
33792//   `shrink_axis_mask`.
33793//
33794// Each conceptual range specification is encoded in the op's argument. This
33795// encoding is best understand by considering a non-trivial example. In
33796// particular,
33797// `foo[1, 2:4, None, ..., :-3:-1, :]` will be encoded as
33798//
33799// ```
33800// begin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0)
33801// end = [2, 4, x, x, -3, x]
33802// strides = [1, 1, x, x, -1, 1]
33803// begin_mask = 1<<4 | 1<<5 = 48
33804// end_mask = 1<<5 = 32
33805// ellipsis_mask = 1<<3 = 8
33806// new_axis_mask = 1<<2 = 4
33807// shrink_axis_mask = 1<<0 = 1
33808// ```
33809//
33810// In this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of
33811// the slice becomes (2, 1, 5, 5, 2, 5).
33812// Let us walk step by step through each argument specification.
33813//
33814// 1.  The first argument in the example slice is turned into `begin = 1` and
33815// `end = begin + 1 = 2`. To disambiguate from the original spec `2:4` we
33816// also set the appropriate bit in `shrink_axis_mask`.
33817//
33818// 2. `2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks have
33819// zero bits contributed.
33820//
33821// 3. None is a synonym for `tf.newaxis`. This means insert a dimension of size 1
33822// dimension in the final shape. Dummy values are contributed to begin,
33823// end and stride, while the new_axis_mask bit is set.
33824//
33825// 4. `...` grab the full ranges from as many dimensions as needed to
33826// fully specify a slice for every dimension of the input shape.
33827//
33828// 5. `:-3:-1` shows the use of negative indices. A negative index `i` associated
33829// with a dimension that has shape `s` is converted to a positive index
33830// `s + i`. So `-1` becomes `s-1` (i.e. the last element). This conversion
33831// is done internally so begin, end and strides receive x, -3, and -1.
33832// The appropriate begin_mask bit is set to indicate the start range is the
33833// full range (ignoring the x).
33834//
33835// 6. `:` indicates that the entire contents of the corresponding dimension
33836// is selected. This is equivalent to `::` or `0::1`. begin, end, and strides
33837// receive 0, 0, and 1, respectively. The appropriate bits in `begin_mask` and
33838// `end_mask` are also set.
33839//
33840// *Requirements*:
33841//   `0 != strides[i] for i in [0, m)`
33842//   `ellipsis_mask must be a power of two (only one ellipsis)`
33843//
33844// Arguments:
33845//
33846//	begin: `begin[k]` specifies the offset into the `k`th range specification.
33847// The exact dimension this corresponds to will be determined by context.
33848// Out-of-bounds values will be silently clamped. If the `k`th bit of
33849// `begin_mask` then `begin[k]` is ignored and the full range of the
33850// appropriate dimension is used instead. Negative values causes indexing
33851// to start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`.
33852//	end: `end[i]` is like `begin` with the exception that `end_mask` is
33853// used to determine full ranges.
33854//	strides: `strides[i]` specifies the increment in the `i`th specification
33855// after extracting a given element. Negative indices will reverse
33856// the original order. Out or range values are
33857// clamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0`
33858func StridedSlice(scope *Scope, input tf.Output, begin tf.Output, end tf.Output, strides tf.Output, optional ...StridedSliceAttr) (output tf.Output) {
33859	if scope.Err() != nil {
33860		return
33861	}
33862	attrs := map[string]interface{}{}
33863	for _, a := range optional {
33864		a(attrs)
33865	}
33866	opspec := tf.OpSpec{
33867		Type: "StridedSlice",
33868		Input: []tf.Input{
33869			input, begin, end, strides,
33870		},
33871		Attrs: attrs,
33872	}
33873	op := scope.AddOperation(opspec)
33874	return op.Output(0)
33875}
33876
33877// RetrieveTPUEmbeddingRMSPropParametersGradAccumDebugAttr is an optional argument to RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug.
33878type RetrieveTPUEmbeddingRMSPropParametersGradAccumDebugAttr func(optionalAttr)
33879
33880// RetrieveTPUEmbeddingRMSPropParametersGradAccumDebugTableId sets the optional table_id attribute to value.
33881// If not specified, defaults to -1
33882func RetrieveTPUEmbeddingRMSPropParametersGradAccumDebugTableId(value int64) RetrieveTPUEmbeddingRMSPropParametersGradAccumDebugAttr {
33883	return func(m optionalAttr) {
33884		m["table_id"] = value
33885	}
33886}
33887
33888// RetrieveTPUEmbeddingRMSPropParametersGradAccumDebugTableName sets the optional table_name attribute to value.
33889// If not specified, defaults to ""
33890func RetrieveTPUEmbeddingRMSPropParametersGradAccumDebugTableName(value string) RetrieveTPUEmbeddingRMSPropParametersGradAccumDebugAttr {
33891	return func(m optionalAttr) {
33892		m["table_name"] = value
33893	}
33894}
33895
33896// RetrieveTPUEmbeddingRMSPropParametersGradAccumDebugConfig sets the optional config attribute to value.
33897// If not specified, defaults to ""
33898func RetrieveTPUEmbeddingRMSPropParametersGradAccumDebugConfig(value string) RetrieveTPUEmbeddingRMSPropParametersGradAccumDebugAttr {
33899	return func(m optionalAttr) {
33900		m["config"] = value
33901	}
33902}
33903
33904// Retrieve RMSProp embedding parameters with debug support.
33905//
33906// An op that retrieves optimization parameters from embedding to host
33907// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
33908// the correct embedding table configuration. For example, this op is
33909// used to retrieve updated parameters before saving a checkpoint.
33910//
33911// Returns:
33912//	parameters: Parameter parameters updated by the RMSProp optimization algorithm.
33913//	ms: Parameter ms updated by the RMSProp optimization algorithm.
33914//	mom: Parameter mom updated by the RMSProp optimization algorithm.
33915//	gradient_accumulators: Parameter gradient_accumulators updated by the RMSProp optimization algorithm.
33916func RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingRMSPropParametersGradAccumDebugAttr) (parameters tf.Output, ms tf.Output, mom tf.Output, gradient_accumulators tf.Output) {
33917	if scope.Err() != nil {
33918		return
33919	}
33920	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
33921	for _, a := range optional {
33922		a(attrs)
33923	}
33924	opspec := tf.OpSpec{
33925		Type: "RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug",
33926
33927		Attrs: attrs,
33928	}
33929	op := scope.AddOperation(opspec)
33930	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
33931}
33932
33933// DenseToDenseSetOperationAttr is an optional argument to DenseToDenseSetOperation.
33934type DenseToDenseSetOperationAttr func(optionalAttr)
33935
33936// DenseToDenseSetOperationValidateIndices sets the optional validate_indices attribute to value.
33937// If not specified, defaults to true
33938func DenseToDenseSetOperationValidateIndices(value bool) DenseToDenseSetOperationAttr {
33939	return func(m optionalAttr) {
33940		m["validate_indices"] = value
33941	}
33942}
33943
33944// Applies set operation along last dimension of 2 `Tensor` inputs.
33945//
33946// See SetOperationOp::SetOperationFromContext for values of `set_operation`.
33947//
33948// Output `result` is a `SparseTensor` represented by `result_indices`,
33949// `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this
33950// has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`
33951// dimension contains the result of `set_operation` applied to the corresponding
33952// `[0...n-1]` dimension of `set`.
33953//
33954// Arguments:
33955//	set1: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`.
33956// Dimension `n` contains values in a set, duplicates are allowed but ignored.
33957//	set2: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set1`.
33958// Dimension `n` contains values in a set, duplicates are allowed but ignored.
33959//
33960//
33961// Returns:
33962//	result_indices: 2D indices of a `SparseTensor`.
33963//	result_values: 1D values of a `SparseTensor`.
33964//	result_shape: 1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is
33965// the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`
33966// is the max result set size across all `0...n-1` dimensions.
33967func DenseToDenseSetOperation(scope *Scope, set1 tf.Output, set2 tf.Output, set_operation string, optional ...DenseToDenseSetOperationAttr) (result_indices tf.Output, result_values tf.Output, result_shape tf.Output) {
33968	if scope.Err() != nil {
33969		return
33970	}
33971	attrs := map[string]interface{}{"set_operation": set_operation}
33972	for _, a := range optional {
33973		a(attrs)
33974	}
33975	opspec := tf.OpSpec{
33976		Type: "DenseToDenseSetOperation",
33977		Input: []tf.Input{
33978			set1, set2,
33979		},
33980		Attrs: attrs,
33981	}
33982	op := scope.AddOperation(opspec)
33983	return op.Output(0), op.Output(1), op.Output(2)
33984}
33985
33986// Returns element-wise remainder of division. When `x < 0` xor `y < 0` is
33987//
33988// true, this follows Python semantics in that the result here is consistent
33989// with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`.
33990//
33991// *NOTE*: `FloorMod` supports broadcasting. More about broadcasting
33992// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
33993func FloorMod(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
33994	if scope.Err() != nil {
33995		return
33996	}
33997	opspec := tf.OpSpec{
33998		Type: "FloorMod",
33999		Input: []tf.Input{
34000			x, y,
34001		},
34002	}
34003	op := scope.AddOperation(opspec)
34004	return op.Output(0)
34005}
34006
34007// Slice a `SparseTensor` based on the `start` and `size`.
34008//
34009// For example, if the input is
34010//
34011//     input_tensor = shape = [2, 7]
34012//     [    a   d e  ]
34013//     [b c          ]
34014//
34015// Graphically the output tensors are:
34016//
34017//     sparse_slice([0, 0], [2, 4]) = shape = [2, 4]
34018//     [    a  ]
34019//     [b c    ]
34020//
34021//     sparse_slice([0, 4], [2, 3]) = shape = [2, 3]
34022//     [ d e  ]
34023//     [      ]
34024//
34025// Arguments:
34026//	indices: 2-D tensor represents the indices of the sparse tensor.
34027//	values: 1-D tensor represents the values of the sparse tensor.
34028//	shape: 1-D. tensor represents the shape of the sparse tensor.
34029//	start: 1-D. tensor represents the start of the slice.
34030//	size: 1-D. tensor represents the size of the slice.
34031// output indices: A list of 1-D tensors represents the indices of the output
34032// sparse tensors.
34033//
34034// Returns:
34035//	output_indices
34036//	output_values: A list of 1-D tensors represents the values of the output sparse
34037// tensors.
34038//	output_shape: A list of 1-D tensors represents the shape of the output sparse
34039// tensors.
34040func SparseSlice(scope *Scope, indices tf.Output, values tf.Output, shape tf.Output, start tf.Output, size tf.Output) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output) {
34041	if scope.Err() != nil {
34042		return
34043	}
34044	opspec := tf.OpSpec{
34045		Type: "SparseSlice",
34046		Input: []tf.Input{
34047			indices, values, shape, start, size,
34048		},
34049	}
34050	op := scope.AddOperation(opspec)
34051	return op.Output(0), op.Output(1), op.Output(2)
34052}
34053
34054// SparseMatrixSparseMatMulAttr is an optional argument to SparseMatrixSparseMatMul.
34055type SparseMatrixSparseMatMulAttr func(optionalAttr)
34056
34057// SparseMatrixSparseMatMulTransposeA sets the optional transpose_a attribute to value.
34058//
34059// value: Indicates whether `a` should be transposed.
34060// If not specified, defaults to false
34061func SparseMatrixSparseMatMulTransposeA(value bool) SparseMatrixSparseMatMulAttr {
34062	return func(m optionalAttr) {
34063		m["transpose_a"] = value
34064	}
34065}
34066
34067// SparseMatrixSparseMatMulTransposeB sets the optional transpose_b attribute to value.
34068//
34069// value: Indicates whether `b` should be transposed.
34070// If not specified, defaults to false
34071func SparseMatrixSparseMatMulTransposeB(value bool) SparseMatrixSparseMatMulAttr {
34072	return func(m optionalAttr) {
34073		m["transpose_b"] = value
34074	}
34075}
34076
34077// SparseMatrixSparseMatMulAdjointA sets the optional adjoint_a attribute to value.
34078//
34079// value: Indicates whether `a` should be conjugate-transposed.
34080// If not specified, defaults to false
34081func SparseMatrixSparseMatMulAdjointA(value bool) SparseMatrixSparseMatMulAttr {
34082	return func(m optionalAttr) {
34083		m["adjoint_a"] = value
34084	}
34085}
34086
34087// SparseMatrixSparseMatMulAdjointB sets the optional adjoint_b attribute to value.
34088//
34089// value: Indicates whether `b` should be conjugate-transposed.
34090// If not specified, defaults to false
34091func SparseMatrixSparseMatMulAdjointB(value bool) SparseMatrixSparseMatMulAttr {
34092	return func(m optionalAttr) {
34093		m["adjoint_b"] = value
34094	}
34095}
34096
34097// Sparse-matrix-multiplies two CSR matrices `a` and `b`.
34098//
34099// Performs a matrix multiplication of a sparse matrix `a` with a sparse matrix
34100// `b`; returns a sparse matrix `a * b`, unless either `a` or `b` is transposed or
34101// adjointed.
34102//
34103// Each matrix may be transposed or adjointed (conjugated and transposed)
34104// according to the Boolean parameters `transpose_a`, `adjoint_a`, `transpose_b`
34105// and `adjoint_b`. At most one of `transpose_a` or `adjoint_a` may be True.
34106// Similarly, at most one of `transpose_b` or `adjoint_b` may be True.
34107//
34108// The inputs must have compatible shapes. That is, the inner dimension of `a`
34109// must be equal to the outer dimension of `b`. This requirement is adjusted
34110// according to whether either `a` or `b` is transposed or adjointed.
34111//
34112// The `type` parameter denotes the type of the matrix elements. Both `a` and `b`
34113// must have the same type. The supported types are: `float32`, `float64`,
34114// `complex64` and `complex128`.
34115//
34116// Both `a` and `b` must have the same rank. Broadcasting is not supported. If they
34117// have rank 3, each batch of 2D CSRSparseMatrices within `a` and `b` must have the
34118// same dense shape.
34119//
34120// The sparse matrix product may have numeric (non-structural) zeros.
34121// TODO(anudhyan): Consider adding a boolean attribute to control whether to prune
34122// zeros.
34123//
34124// Usage example:
34125//
34126// ```python
34127//     from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops
34128//
34129//     a_indices = np.array([[0, 0], [2, 3], [2, 4], [3, 0]])
34130//     a_values = np.array([1.0, 5.0, -1.0, -2.0], np.float32)
34131//     a_dense_shape = [4, 5]
34132//
34133//     b_indices = np.array([[0, 0], [3, 0], [3, 1]])
34134//     b_values = np.array([2.0, 7.0, 8.0], np.float32)
34135//     b_dense_shape = [5, 3]
34136//
34137//     with tf.Session() as sess:
34138//       # Define (COO format) Sparse Tensors over Numpy arrays
34139//       a_st = tf.sparse.SparseTensor(a_indices, a_values, a_dense_shape)
34140//       b_st = tf.sparse.SparseTensor(b_indices, b_values, b_dense_shape)
34141//
34142//       # Convert SparseTensors to CSR SparseMatrix
34143//       a_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(
34144//           a_st.indices, a_st.values, a_st.dense_shape)
34145//       b_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(
34146//           b_st.indices, b_st.values, b_st.dense_shape)
34147//
34148//       # Compute the CSR SparseMatrix matrix multiplication
34149//       c_sm = sparse_csr_matrix_ops.sparse_matrix_sparse_mat_mul(
34150//           a=a_sm, b=b_sm, type=tf.float32)
34151//
34152//       # Convert the CSR SparseMatrix product to a dense Tensor
34153//       c_sm_dense = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
34154//           c_sm, tf.float32)
34155//       # Evaluate the dense Tensor value
34156//       c_sm_dense_value = sess.run(c_sm_dense)
34157// ```
34158//
34159// `c_sm_dense_value` stores the dense matrix product:
34160//
34161// ```
34162//     [[  2.   0.   0.]
34163//      [  0.   0.   0.]
34164//      [ 35.  40.   0.]
34165//      [ -4.   0.   0.]]
34166// ```
34167//
34168// a: A `CSRSparseMatrix`.
34169// b: A `CSRSparseMatrix` with the same type and rank as `a`.
34170// type: The type of both `a` and `b`.
34171// transpose_a: If True, `a` transposed before multiplication.
34172// transpose_b: If True, `b` transposed before multiplication.
34173// adjoint_a: If True, `a` adjointed before multiplication.
34174// adjoint_b: If True, `b` adjointed before multiplication.
34175//
34176// Arguments:
34177//	a: A CSRSparseMatrix.
34178//	b: A CSRSparseMatrix.
34179//
34180//
34181// Returns A CSRSparseMatrix.
34182func SparseMatrixSparseMatMul(scope *Scope, a tf.Output, b tf.Output, type_ tf.DataType, optional ...SparseMatrixSparseMatMulAttr) (c tf.Output) {
34183	if scope.Err() != nil {
34184		return
34185	}
34186	attrs := map[string]interface{}{"type": type_}
34187	for _, a := range optional {
34188		a(attrs)
34189	}
34190	opspec := tf.OpSpec{
34191		Type: "SparseMatrixSparseMatMul",
34192		Input: []tf.Input{
34193			a, b,
34194		},
34195		Attrs: attrs,
34196	}
34197	op := scope.AddOperation(opspec)
34198	return op.Output(0)
34199}
34200
34201// CopyHostAttr is an optional argument to CopyHost.
34202type CopyHostAttr func(optionalAttr)
34203
34204// CopyHostTensorName sets the optional tensor_name attribute to value.
34205//
34206// value: The name of the input tensor.
34207// If not specified, defaults to ""
34208func CopyHostTensorName(value string) CopyHostAttr {
34209	return func(m optionalAttr) {
34210		m["tensor_name"] = value
34211	}
34212}
34213
34214// CopyHostDebugOpsSpec sets the optional debug_ops_spec attribute to value.
34215//
34216// value: A list of debug op spec (op, url, gated_grpc) for attached debug
34217// ops. Each element of the list has the format
34218// <debug_op>;<grpc_url>;<gated_grpc>, wherein gated_grpc is boolean represented
34219// as 0/1. E.g., "DebugIdentity;grpc://foo:3333;1",
34220// "DebugIdentity;file:///tmp/tfdbg_1;0".
34221// If not specified, defaults to <>
34222func CopyHostDebugOpsSpec(value []string) CopyHostAttr {
34223	return func(m optionalAttr) {
34224		m["debug_ops_spec"] = value
34225	}
34226}
34227
34228// Copy a tensor to host.
34229//
34230// Performs CPU-to-CPU deep-copying of tensor.
34231// N.B.: If the all downstream attached debug ops are disabled given the current
34232// gRPC gating status, the output will simply forward the input tensor without
34233// deep-copying. See the documentation of Debug* ops for more details.
34234//
34235// Unlike the Copy Op, this op has HostMemory constraint on its input or output.
34236//
34237// Arguments:
34238//	input: Input tensor.
34239func CopyHost(scope *Scope, input tf.Output, optional ...CopyHostAttr) (output tf.Output) {
34240	if scope.Err() != nil {
34241		return
34242	}
34243	attrs := map[string]interface{}{}
34244	for _, a := range optional {
34245		a(attrs)
34246	}
34247	opspec := tf.OpSpec{
34248		Type: "CopyHost",
34249		Input: []tf.Input{
34250			input,
34251		},
34252		Attrs: attrs,
34253	}
34254	op := scope.AddOperation(opspec)
34255	return op.Output(0)
34256}
34257
34258// Sparse addition of two CSR matrices, C = alpha * A + beta * B.
34259//
34260// The gradients of SparseMatrixAdd outputs with respect to alpha and beta are not
34261// currently defined (TensorFlow will return zeros for these entries).
34262//
34263// Arguments:
34264//	a: A CSRSparseMatrix.
34265//	b: A CSRSparseMatrix.
34266//	alpha: A constant scalar.
34267//	beta: A constant scalar.
34268//
34269// Returns A CSRSparseMatrix.
34270func SparseMatrixAdd(scope *Scope, a tf.Output, b tf.Output, alpha tf.Output, beta tf.Output) (c tf.Output) {
34271	if scope.Err() != nil {
34272		return
34273	}
34274	opspec := tf.OpSpec{
34275		Type: "SparseMatrixAdd",
34276		Input: []tf.Input{
34277			a, b, alpha, beta,
34278		},
34279	}
34280	op := scope.AddOperation(opspec)
34281	return op.Output(0)
34282}
34283
34284// SparseMatrixMatMulAttr is an optional argument to SparseMatrixMatMul.
34285type SparseMatrixMatMulAttr func(optionalAttr)
34286
34287// SparseMatrixMatMulTransposeA sets the optional transpose_a attribute to value.
34288//
34289// value: Indicates whether `a` should be transposed.
34290// If not specified, defaults to false
34291func SparseMatrixMatMulTransposeA(value bool) SparseMatrixMatMulAttr {
34292	return func(m optionalAttr) {
34293		m["transpose_a"] = value
34294	}
34295}
34296
34297// SparseMatrixMatMulTransposeB sets the optional transpose_b attribute to value.
34298//
34299// value: Indicates whether `b` should be transposed.
34300// If not specified, defaults to false
34301func SparseMatrixMatMulTransposeB(value bool) SparseMatrixMatMulAttr {
34302	return func(m optionalAttr) {
34303		m["transpose_b"] = value
34304	}
34305}
34306
34307// SparseMatrixMatMulAdjointA sets the optional adjoint_a attribute to value.
34308//
34309// value: Indicates whether `a` should be conjugate-transposed.
34310// If not specified, defaults to false
34311func SparseMatrixMatMulAdjointA(value bool) SparseMatrixMatMulAttr {
34312	return func(m optionalAttr) {
34313		m["adjoint_a"] = value
34314	}
34315}
34316
34317// SparseMatrixMatMulAdjointB sets the optional adjoint_b attribute to value.
34318//
34319// value: Indicates whether `b` should be conjugate-transposed.
34320// If not specified, defaults to false
34321func SparseMatrixMatMulAdjointB(value bool) SparseMatrixMatMulAttr {
34322	return func(m optionalAttr) {
34323		m["adjoint_b"] = value
34324	}
34325}
34326
34327// SparseMatrixMatMulTransposeOutput sets the optional transpose_output attribute to value.
34328//
34329// value: Transposes the product of `a` and `b`.
34330// If not specified, defaults to false
34331func SparseMatrixMatMulTransposeOutput(value bool) SparseMatrixMatMulAttr {
34332	return func(m optionalAttr) {
34333		m["transpose_output"] = value
34334	}
34335}
34336
34337// SparseMatrixMatMulConjugateOutput sets the optional conjugate_output attribute to value.
34338//
34339// value: Conjugates the product of `a` and `b`.
34340// If not specified, defaults to false
34341func SparseMatrixMatMulConjugateOutput(value bool) SparseMatrixMatMulAttr {
34342	return func(m optionalAttr) {
34343		m["conjugate_output"] = value
34344	}
34345}
34346
34347// Matrix-multiplies a sparse matrix with a dense matrix.
34348//
34349// Returns a dense matrix.
34350// For inputs A and B, where A is CSR and B is dense; this op returns a dense C;
34351//
34352// If transpose_output is false, returns:
34353// ```
34354//   C = A . B
34355// ```
34356//
34357// If transpose_output is `true`, returns:
34358// ```
34359//   C = transpose(A . B) = transpose(B) . transpose(A)
34360// ```
34361// where the transposition is performed along the two innermost (matrix)
34362// dimensions.
34363//
34364// If conjugate_output is `true`, returns:
34365// ```
34366//   C = conjugate(A . B) = conjugate(A) . conjugate(B)
34367// ```
34368//
34369// If both conjugate_output and transpose_output are `true`, returns:
34370// ```
34371//   C = conjugate(transpose(A . B)) = conjugate(transpose(B)) .
34372//                                     conjugate(transpose(A))
34373// ```
34374//
34375// Arguments:
34376//	a: A CSRSparseMatrix.
34377//	b: A dense tensor.
34378//
34379// Returns A dense output tensor.
34380func SparseMatrixMatMul(scope *Scope, a tf.Output, b tf.Output, optional ...SparseMatrixMatMulAttr) (output tf.Output) {
34381	if scope.Err() != nil {
34382		return
34383	}
34384	attrs := map[string]interface{}{}
34385	for _, a := range optional {
34386		a(attrs)
34387	}
34388	opspec := tf.OpSpec{
34389		Type: "SparseMatrixMatMul",
34390		Input: []tf.Input{
34391			a, b,
34392		},
34393		Attrs: attrs,
34394	}
34395	op := scope.AddOperation(opspec)
34396	return op.Output(0)
34397}
34398
34399// Gather ragged slices from `params` axis `0` according to `indices`.
34400//
34401// Outputs a `RaggedTensor` output composed from `output_dense_values` and
34402// `output_nested_splits`, such that:
34403//
34404// ```python
34405// output.shape = indices.shape + params.shape[1:]
34406// output.ragged_rank = indices.shape.ndims + params.ragged_rank
34407// output[i...j, d0...dn] = params[indices[i...j], d0...dn]
34408// ```
34409//
34410// where
34411//
34412// * `params =
34413//    ragged.from_nested_row_splits(params_dense_values, params_nested_splits)`
34414//    provides the values that should be gathered.
34415// * `indices` ia a dense tensor with dtype `int32` or `int64`, indicating which
34416//    values should be gathered.
34417// * `output =
34418//    ragged.from_nested_row_splits(output_dense_values, output_nested_splits)`
34419//    is the output tensor.
34420//
34421// (Note: This c++ op is used to implement the higher-level python
34422// `tf.ragged.gather` op, which also supports ragged indices.)
34423//
34424//
34425// Arguments:
34426//	params_nested_splits: The `nested_row_splits` tensors that define the row-partitioning for the
34427// `params` RaggedTensor input.
34428//	params_dense_values: The `flat_values` for the `params` RaggedTensor. There was a terminology change
34429// at the python level from dense_values to flat_values, so dense_values is the
34430// deprecated name.
34431//	indices: Indices in the outermost dimension of `params` of the values that should be
34432// gathered.
34433//	OUTPUT_RAGGED_RANK: The ragged rank of the output RaggedTensor. `output_nested_splits` will contain
34434// this number of `row_splits` tensors. This value should equal
34435// `indices.shape.ndims + params.ragged_rank - 1`.
34436//
34437// Returns:
34438//	output_nested_splits: The `nested_row_splits` tensors that define the row-partitioning for the
34439// returned RaggedTensor.
34440//	output_dense_values: The `flat_values` for the returned RaggedTensor.
34441func RaggedGather(scope *Scope, params_nested_splits []tf.Output, params_dense_values tf.Output, indices tf.Output, OUTPUT_RAGGED_RANK int64) (output_nested_splits []tf.Output, output_dense_values tf.Output) {
34442	if scope.Err() != nil {
34443		return
34444	}
34445	attrs := map[string]interface{}{"OUTPUT_RAGGED_RANK": OUTPUT_RAGGED_RANK}
34446	opspec := tf.OpSpec{
34447		Type: "RaggedGather",
34448		Input: []tf.Input{
34449			tf.OutputList(params_nested_splits), params_dense_values, indices,
34450		},
34451		Attrs: attrs,
34452	}
34453	op := scope.AddOperation(opspec)
34454	if scope.Err() != nil {
34455		return
34456	}
34457	var idx int
34458	var err error
34459	if output_nested_splits, idx, err = makeOutputList(op, idx, "output_nested_splits"); err != nil {
34460		scope.UpdateErr("RaggedGather", err)
34461		return
34462	}
34463	output_dense_values = op.Output(idx)
34464	return output_nested_splits, output_dense_values
34465}
34466
34467// Elementwise computes the bitwise OR of `x` and `y`.
34468//
34469// The result will have those bits set, that are set in `x`, `y` or both. The
34470// computation is performed on the underlying representations of `x` and `y`.
34471//
34472// For example:
34473//
34474// ```python
34475// import tensorflow as tf
34476// from tensorflow.python.ops import bitwise_ops
34477// dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64,
34478//               tf.uint8, tf.uint16, tf.uint32, tf.uint64]
34479//
34480// for dtype in dtype_list:
34481//   lhs = tf.constant([0, 5, 3, 14], dtype=dtype)
34482//   rhs = tf.constant([5, 0, 7, 11], dtype=dtype)
34483//   exp = tf.constant([5, 5, 7, 15], dtype=tf.float32)
34484//
34485//   res = bitwise_ops.bitwise_or(lhs, rhs)
34486//   tf.assert_equal(tf.cast(res,  tf.float32), exp)  # TRUE
34487// ```
34488//
34489func BitwiseOr(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
34490	if scope.Err() != nil {
34491		return
34492	}
34493	opspec := tf.OpSpec{
34494		Type: "BitwiseOr",
34495		Input: []tf.Input{
34496			x, y,
34497		},
34498	}
34499	op := scope.AddOperation(opspec)
34500	return op.Output(0)
34501}
34502
34503// BatchMatMulV2Attr is an optional argument to BatchMatMulV2.
34504type BatchMatMulV2Attr func(optionalAttr)
34505
34506// BatchMatMulV2AdjX sets the optional adj_x attribute to value.
34507//
34508// value: If `True`, adjoint the slices of `x`. Defaults to `False`.
34509// If not specified, defaults to false
34510func BatchMatMulV2AdjX(value bool) BatchMatMulV2Attr {
34511	return func(m optionalAttr) {
34512		m["adj_x"] = value
34513	}
34514}
34515
34516// BatchMatMulV2AdjY sets the optional adj_y attribute to value.
34517//
34518// value: If `True`, adjoint the slices of `y`. Defaults to `False`.
34519// If not specified, defaults to false
34520func BatchMatMulV2AdjY(value bool) BatchMatMulV2Attr {
34521	return func(m optionalAttr) {
34522		m["adj_y"] = value
34523	}
34524}
34525
34526// Multiplies slices of two tensors in batches.
34527//
34528// Multiplies all slices of `Tensor` `x` and `y` (each slice can be
34529// viewed as an element of a batch), and arranges the individual results
34530// in a single output tensor of the same batch size. Each of the
34531// individual slices can optionally be adjointed (to adjoint a matrix
34532// means to transpose and conjugate it) before multiplication by setting
34533// the `adj_x` or `adj_y` flag to `True`, which are by default `False`.
34534//
34535// The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]`
34536// and `[..., r_y, c_y]`.
34537//
34538// The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where:
34539//
34540//     r_o = c_x if adj_x else r_x
34541//     c_o = r_y if adj_y else c_y
34542//
34543// It is computed as:
34544//
34545//     output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])
34546//
34547// *NOTE*: `BatchMatMulV2` supports broadcasting in the batch dimensions. More
34548// about broadcasting
34549// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).
34550//
34551//
34552// Arguments:
34553//	x: 2-D or higher with shape `[..., r_x, c_x]`.
34554//	y: 2-D or higher with shape `[..., r_y, c_y]`.
34555//
34556// Returns 3-D or higher with shape `[..., r_o, c_o]`
34557func BatchMatMulV2(scope *Scope, x tf.Output, y tf.Output, optional ...BatchMatMulV2Attr) (output tf.Output) {
34558	if scope.Err() != nil {
34559		return
34560	}
34561	attrs := map[string]interface{}{}
34562	for _, a := range optional {
34563		a(attrs)
34564	}
34565	opspec := tf.OpSpec{
34566		Type: "BatchMatMulV2",
34567		Input: []tf.Input{
34568			x, y,
34569		},
34570		Attrs: attrs,
34571	}
34572	op := scope.AddOperation(opspec)
34573	return op.Output(0)
34574}
34575
34576// SendAttr is an optional argument to Send.
34577type SendAttr func(optionalAttr)
34578
34579// SendClientTerminated sets the optional client_terminated attribute to value.
34580//
34581// value: If set to true, this indicates that the node was added
34582// to the graph as a result of a client-side feed or fetch of Tensor data,
34583// in which case the corresponding send or recv is expected to be managed
34584// locally by the caller.
34585// If not specified, defaults to false
34586func SendClientTerminated(value bool) SendAttr {
34587	return func(m optionalAttr) {
34588		m["client_terminated"] = value
34589	}
34590}
34591
34592// Sends the named tensor from send_device to recv_device.
34593//
34594// Arguments:
34595//	tensor: The tensor to send.
34596//	tensor_name: The name of the tensor to send.
34597//	send_device: The name of the device sending the tensor.
34598//	send_device_incarnation: The current incarnation of send_device.
34599//	recv_device: The name of the device receiving the tensor.
34600//
34601// Returns the created operation.
34602func Send(scope *Scope, tensor tf.Output, tensor_name string, send_device string, send_device_incarnation int64, recv_device string, optional ...SendAttr) (o *tf.Operation) {
34603	if scope.Err() != nil {
34604		return
34605	}
34606	attrs := map[string]interface{}{"tensor_name": tensor_name, "send_device": send_device, "send_device_incarnation": send_device_incarnation, "recv_device": recv_device}
34607	for _, a := range optional {
34608		a(attrs)
34609	}
34610	opspec := tf.OpSpec{
34611		Type: "Send",
34612		Input: []tf.Input{
34613			tensor,
34614		},
34615		Attrs: attrs,
34616	}
34617	return scope.AddOperation(opspec)
34618}
34619
34620// StringSplitV2Attr is an optional argument to StringSplitV2.
34621type StringSplitV2Attr func(optionalAttr)
34622
34623// StringSplitV2Maxsplit sets the optional maxsplit attribute to value.
34624//
34625// value: An `int`. If `maxsplit > 0`, limit of the split of the result.
34626// If not specified, defaults to -1
34627func StringSplitV2Maxsplit(value int64) StringSplitV2Attr {
34628	return func(m optionalAttr) {
34629		m["maxsplit"] = value
34630	}
34631}
34632
34633// Split elements of `source` based on `sep` into a `SparseTensor`.
34634//
34635// Let N be the size of source (typically N will be the batch size). Split each
34636// element of `source` based on `sep` and return a `SparseTensor`
34637// containing the split tokens. Empty tokens are ignored.
34638//
34639// For example, N = 2, source[0] is 'hello world' and source[1] is 'a b c',
34640// then the output will be
34641// ```
34642// st.indices = [0, 0;
34643//               0, 1;
34644//               1, 0;
34645//               1, 1;
34646//               1, 2]
34647// st.shape = [2, 3]
34648// st.values = ['hello', 'world', 'a', 'b', 'c']
34649// ```
34650//
34651// If `sep` is given, consecutive delimiters are not grouped together and are
34652// deemed to delimit empty strings. For example, source of `"1<>2<><>3"` and
34653// sep of `"<>"` returns `["1", "2", "", "3"]`. If `sep` is None or an empty
34654// string, consecutive whitespace are regarded as a single separator, and the
34655// result will contain no empty strings at the startor end if the string has
34656// leading or trailing whitespace.
34657//
34658// Note that the above mentioned behavior matches python's str.split.
34659//
34660// Arguments:
34661//	input: `1-D` string `Tensor`, the strings to split.
34662//	sep: `0-D` string `Tensor`, the delimiter character.
34663func StringSplitV2(scope *Scope, input tf.Output, sep tf.Output, optional ...StringSplitV2Attr) (indices tf.Output, values tf.Output, shape tf.Output) {
34664	if scope.Err() != nil {
34665		return
34666	}
34667	attrs := map[string]interface{}{}
34668	for _, a := range optional {
34669		a(attrs)
34670	}
34671	opspec := tf.OpSpec{
34672		Type: "StringSplitV2",
34673		Input: []tf.Input{
34674			input, sep,
34675		},
34676		Attrs: attrs,
34677	}
34678	op := scope.AddOperation(opspec)
34679	return op.Output(0), op.Output(1), op.Output(2)
34680}
34681
34682// Compute the lower regularized incomplete Gamma function `P(a, x)`.
34683//
34684// The lower regularized incomplete Gamma function is defined as:
34685//
34686//
34687// \\(P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x)\\)
34688//
34689// where
34690//
34691// \\(gamma(a, x) = \\int_{0}^{x} t^{a-1} exp(-t) dt\\)
34692//
34693// is the lower incomplete Gamma function.
34694//
34695// Note, above `Q(a, x)` (`Igammac`) is the upper regularized complete
34696// Gamma function.
34697func Igamma(scope *Scope, a tf.Output, x tf.Output) (z tf.Output) {
34698	if scope.Err() != nil {
34699		return
34700	}
34701	opspec := tf.OpSpec{
34702		Type: "Igamma",
34703		Input: []tf.Input{
34704			a, x,
34705		},
34706	}
34707	op := scope.AddOperation(opspec)
34708	return op.Output(0)
34709}
34710
34711// Convert a (possibly batched) CSRSparseMatrix to dense.
34712//
34713// Arguments:
34714//	sparse_input: A batched CSRSparseMatrix.
34715//
34716//
34717// Returns A dense tensor.
34718func CSRSparseMatrixToDense(scope *Scope, sparse_input tf.Output, type_ tf.DataType) (dense_output tf.Output) {
34719	if scope.Err() != nil {
34720		return
34721	}
34722	attrs := map[string]interface{}{"type": type_}
34723	opspec := tf.OpSpec{
34724		Type: "CSRSparseMatrixToDense",
34725		Input: []tf.Input{
34726			sparse_input,
34727		},
34728		Attrs: attrs,
34729	}
34730	op := scope.AddOperation(opspec)
34731	return op.Output(0)
34732}
34733
34734// Add all input tensors element wise.
34735//
34736//   Inputs must be of same size and shape.
34737//
34738//   ```python
34739//   x = [9, 7, 10]
34740//   tf.math.add_n(x) ==> 26
34741//   ```
34742func AddN(scope *Scope, inputs []tf.Output) (sum tf.Output) {
34743	if scope.Err() != nil {
34744		return
34745	}
34746	opspec := tf.OpSpec{
34747		Type: "AddN",
34748		Input: []tf.Input{
34749			tf.OutputList(inputs),
34750		},
34751	}
34752	op := scope.AddOperation(opspec)
34753	return op.Output(0)
34754}
34755
34756// Converts a (possibly batched) CSRSparesMatrix to a SparseTensor.
34757//
34758// Arguments:
34759//	sparse_matrix: A (possibly batched) CSRSparseMatrix.
34760//
34761//
34762// Returns:
34763//	indices: SparseTensor indices.
34764//	values: SparseTensor values.
34765//	dense_shape: SparseTensor dense shape.
34766func CSRSparseMatrixToSparseTensor(scope *Scope, sparse_matrix tf.Output, type_ tf.DataType) (indices tf.Output, values tf.Output, dense_shape tf.Output) {
34767	if scope.Err() != nil {
34768		return
34769	}
34770	attrs := map[string]interface{}{"type": type_}
34771	opspec := tf.OpSpec{
34772		Type: "CSRSparseMatrixToSparseTensor",
34773		Input: []tf.Input{
34774			sparse_matrix,
34775		},
34776		Attrs: attrs,
34777	}
34778	op := scope.AddOperation(opspec)
34779	return op.Output(0), op.Output(1), op.Output(2)
34780}
34781
34782// OrderedMapStageAttr is an optional argument to OrderedMapStage.
34783type OrderedMapStageAttr func(optionalAttr)
34784
34785// OrderedMapStageCapacity sets the optional capacity attribute to value.
34786//
34787// value: Maximum number of elements in the Staging Area. If > 0, inserts
34788// on the container will block when the capacity is reached.
34789// If not specified, defaults to 0
34790//
34791// REQUIRES: value >= 0
34792func OrderedMapStageCapacity(value int64) OrderedMapStageAttr {
34793	return func(m optionalAttr) {
34794		m["capacity"] = value
34795	}
34796}
34797
34798// OrderedMapStageMemoryLimit sets the optional memory_limit attribute to value.
34799// If not specified, defaults to 0
34800//
34801// REQUIRES: value >= 0
34802func OrderedMapStageMemoryLimit(value int64) OrderedMapStageAttr {
34803	return func(m optionalAttr) {
34804		m["memory_limit"] = value
34805	}
34806}
34807
34808// OrderedMapStageContainer sets the optional container attribute to value.
34809//
34810// value: If non-empty, this queue is placed in the given container. Otherwise,
34811// a default container is used.
34812// If not specified, defaults to ""
34813func OrderedMapStageContainer(value string) OrderedMapStageAttr {
34814	return func(m optionalAttr) {
34815		m["container"] = value
34816	}
34817}
34818
34819// OrderedMapStageSharedName sets the optional shared_name attribute to value.
34820//
34821// value: It is necessary to match this name to the matching Unstage Op.
34822// If not specified, defaults to ""
34823func OrderedMapStageSharedName(value string) OrderedMapStageAttr {
34824	return func(m optionalAttr) {
34825		m["shared_name"] = value
34826	}
34827}
34828
34829// Stage (key, values) in the underlying container which behaves like a ordered
34830//
34831// associative container.   Elements are ordered by key.
34832//
34833// Arguments:
34834//	key: int64
34835//
34836//	values: a list of tensors
34837// dtypes A list of data types that inserted values should adhere to.
34838//
34839//
34840// Returns the created operation.
34841func OrderedMapStage(scope *Scope, key tf.Output, indices tf.Output, values []tf.Output, dtypes []tf.DataType, optional ...OrderedMapStageAttr) (o *tf.Operation) {
34842	if scope.Err() != nil {
34843		return
34844	}
34845	attrs := map[string]interface{}{"dtypes": dtypes}
34846	for _, a := range optional {
34847		a(attrs)
34848	}
34849	opspec := tf.OpSpec{
34850		Type: "OrderedMapStage",
34851		Input: []tf.Input{
34852			key, indices, tf.OutputList(values),
34853		},
34854		Attrs: attrs,
34855	}
34856	return scope.AddOperation(opspec)
34857}
34858
34859// TPUReplicateMetadataAttr is an optional argument to TPUReplicateMetadata.
34860type TPUReplicateMetadataAttr func(optionalAttr)
34861
34862// TPUReplicateMetadataNumCoresPerReplica sets the optional num_cores_per_replica attribute to value.
34863//
34864// value: Number of cores per replica. Used for model parallelism.
34865// If not specified, defaults to 1
34866func TPUReplicateMetadataNumCoresPerReplica(value int64) TPUReplicateMetadataAttr {
34867	return func(m optionalAttr) {
34868		m["num_cores_per_replica"] = value
34869	}
34870}
34871
34872// TPUReplicateMetadataTopology sets the optional topology attribute to value.
34873//
34874// value: TopologyProto indicating the topology of the TPU pod slice.
34875// If not specified, defaults to ""
34876func TPUReplicateMetadataTopology(value string) TPUReplicateMetadataAttr {
34877	return func(m optionalAttr) {
34878		m["topology"] = value
34879	}
34880}
34881
34882// TPUReplicateMetadataUseTpu sets the optional use_tpu attribute to value.
34883//
34884// value: Whether to place the computation on the TPU.
34885// If not specified, defaults to true
34886func TPUReplicateMetadataUseTpu(value bool) TPUReplicateMetadataAttr {
34887	return func(m optionalAttr) {
34888		m["use_tpu"] = value
34889	}
34890}
34891
34892// TPUReplicateMetadataDeviceAssignment sets the optional device_assignment attribute to value.
34893//
34894// value: The assignment of devices for the computation.
34895// If not specified, defaults to <>
34896func TPUReplicateMetadataDeviceAssignment(value []int64) TPUReplicateMetadataAttr {
34897	return func(m optionalAttr) {
34898		m["device_assignment"] = value
34899	}
34900}
34901
34902// TPUReplicateMetadataComputationShape sets the optional computation_shape attribute to value.
34903//
34904// value: DEPRECATED. Use num_cores_per_replica instead.
34905// If not specified, defaults to <>
34906func TPUReplicateMetadataComputationShape(value []int64) TPUReplicateMetadataAttr {
34907	return func(m optionalAttr) {
34908		m["computation_shape"] = value
34909	}
34910}
34911
34912// TPUReplicateMetadataHostComputeCore sets the optional host_compute_core attribute to value.
34913// If not specified, defaults to <>
34914func TPUReplicateMetadataHostComputeCore(value []string) TPUReplicateMetadataAttr {
34915	return func(m optionalAttr) {
34916		m["host_compute_core"] = value
34917	}
34918}
34919
34920// TPUReplicateMetadataPaddingMap sets the optional padding_map attribute to value.
34921// If not specified, defaults to <>
34922func TPUReplicateMetadataPaddingMap(value []string) TPUReplicateMetadataAttr {
34923	return func(m optionalAttr) {
34924		m["padding_map"] = value
34925	}
34926}
34927
34928// TPUReplicateMetadataStepMarkerLocation sets the optional step_marker_location attribute to value.
34929// If not specified, defaults to "STEP_MARK_AT_ENTRY"
34930func TPUReplicateMetadataStepMarkerLocation(value string) TPUReplicateMetadataAttr {
34931	return func(m optionalAttr) {
34932		m["step_marker_location"] = value
34933	}
34934}
34935
34936// TPUReplicateMetadataAllowSoftPlacement sets the optional allow_soft_placement attribute to value.
34937// If not specified, defaults to false
34938func TPUReplicateMetadataAllowSoftPlacement(value bool) TPUReplicateMetadataAttr {
34939	return func(m optionalAttr) {
34940		m["allow_soft_placement"] = value
34941	}
34942}
34943
34944// TPUReplicateMetadataUseSpmdForXlaPartitioning sets the optional use_spmd_for_xla_partitioning attribute to value.
34945// If not specified, defaults to false
34946func TPUReplicateMetadataUseSpmdForXlaPartitioning(value bool) TPUReplicateMetadataAttr {
34947	return func(m optionalAttr) {
34948		m["use_spmd_for_xla_partitioning"] = value
34949	}
34950}
34951
34952// Metadata indicating how the TPU computation should be replicated.
34953//
34954// This operation holds the metadata common to operations of a `tpu.replicate()` computation subgraph.
34955//
34956// Arguments:
34957//	num_replicas: Number of replicas of the computation
34958//
34959// Returns the created operation.
34960func TPUReplicateMetadata(scope *Scope, num_replicas int64, optional ...TPUReplicateMetadataAttr) (o *tf.Operation) {
34961	if scope.Err() != nil {
34962		return
34963	}
34964	attrs := map[string]interface{}{"num_replicas": num_replicas}
34965	for _, a := range optional {
34966		a(attrs)
34967	}
34968	opspec := tf.OpSpec{
34969		Type: "TPUReplicateMetadata",
34970
34971		Attrs: attrs,
34972	}
34973	return scope.AddOperation(opspec)
34974}
34975
34976// Returns the TopK unique values in the array in sorted order.
34977//
34978// The running time is proportional to the product of K and the input
34979// size. Sorting the whole array is more efficient for sufficiently large
34980// values of K. The median-of-medians algorithm is probably faster, but
34981// difficult to implement efficiently in XLA. If there are fewer than K
34982// unique numbers (not NANs), the results are padded with negative
34983// infinity. NaNs are never returned. Subnormal numbers are flushed to
34984// zero. If an element appears at multiple indices, the highest index is
34985// returned. If a TopK element never appears in the input due to padding
34986// values, the indices are padded with negative one. If a padding value
34987// appears in the input and padding is needed, the highest index of the
34988// padding value will be returned. The semantics are not the same as
34989// kth_order_statistic.
34990func TopKUnique(scope *Scope, input tf.Output, k int64) (topk tf.Output, topk_indices tf.Output) {
34991	if scope.Err() != nil {
34992		return
34993	}
34994	attrs := map[string]interface{}{"k": k}
34995	opspec := tf.OpSpec{
34996		Type: "TopKUnique",
34997		Input: []tf.Input{
34998			input,
34999		},
35000		Attrs: attrs,
35001	}
35002	op := scope.AddOperation(opspec)
35003	return op.Output(0), op.Output(1)
35004}
35005
35006// SizeAttr is an optional argument to Size.
35007type SizeAttr func(optionalAttr)
35008
35009// SizeOutType sets the optional out_type attribute to value.
35010// If not specified, defaults to DT_INT32
35011func SizeOutType(value tf.DataType) SizeAttr {
35012	return func(m optionalAttr) {
35013		m["out_type"] = value
35014	}
35015}
35016
35017// Returns the size of a tensor.
35018//
35019// This operation returns an integer representing the number of elements in
35020// `input`.
35021//
35022// For example:
35023//
35024// ```
35025// # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]]
35026// size(t) ==> 12
35027// ```
35028func Size(scope *Scope, input tf.Output, optional ...SizeAttr) (output tf.Output) {
35029	if scope.Err() != nil {
35030		return
35031	}
35032	attrs := map[string]interface{}{}
35033	for _, a := range optional {
35034		a(attrs)
35035	}
35036	opspec := tf.OpSpec{
35037		Type: "Size",
35038		Input: []tf.Input{
35039			input,
35040		},
35041		Attrs: attrs,
35042	}
35043	op := scope.AddOperation(opspec)
35044	return op.Output(0)
35045}
35046
35047// TPUReplicatedInputAttr is an optional argument to TPUReplicatedInput.
35048type TPUReplicatedInputAttr func(optionalAttr)
35049
35050// TPUReplicatedInputIsMirroredVariable sets the optional is_mirrored_variable attribute to value.
35051// If not specified, defaults to false
35052func TPUReplicatedInputIsMirroredVariable(value bool) TPUReplicatedInputAttr {
35053	return func(m optionalAttr) {
35054		m["is_mirrored_variable"] = value
35055	}
35056}
35057
35058// TPUReplicatedInputIndex sets the optional index attribute to value.
35059// If not specified, defaults to -1
35060func TPUReplicatedInputIndex(value int64) TPUReplicatedInputAttr {
35061	return func(m optionalAttr) {
35062		m["index"] = value
35063	}
35064}
35065
35066// TPUReplicatedInputIsPacked sets the optional is_packed attribute to value.
35067// If not specified, defaults to false
35068func TPUReplicatedInputIsPacked(value bool) TPUReplicatedInputAttr {
35069	return func(m optionalAttr) {
35070		m["is_packed"] = value
35071	}
35072}
35073
35074// Connects N inputs to an N-way replicated TPU computation.
35075//
35076// This operation holds a replicated input to a `tpu.replicate()` computation subgraph.
35077// Each replicated input has the same shape and type alongside the output.
35078//
35079// For example:
35080// ```
35081// %a = "tf.opA"()
35082// %b = "tf.opB"()
35083// %replicated_input = "tf.TPUReplicatedInput"(%a, %b)
35084// %computation = "tf.Computation"(%replicated_input)
35085// ```
35086// The above computation has a replicated input of two replicas.
35087func TPUReplicatedInput(scope *Scope, inputs []tf.Output, optional ...TPUReplicatedInputAttr) (output tf.Output) {
35088	if scope.Err() != nil {
35089		return
35090	}
35091	attrs := map[string]interface{}{}
35092	for _, a := range optional {
35093		a(attrs)
35094	}
35095	opspec := tf.OpSpec{
35096		Type: "TPUReplicatedInput",
35097		Input: []tf.Input{
35098			tf.OutputList(inputs),
35099		},
35100		Attrs: attrs,
35101	}
35102	op := scope.AddOperation(opspec)
35103	return op.Output(0)
35104}
35105
35106// Creates a tensor filled with a scalar value.
35107//
35108// This operation creates a tensor of shape `dims` and fills it with `value`.
35109//
35110// For example:
35111//
35112// ```
35113// # Output tensor has shape [2, 3].
35114// fill([2, 3], 9) ==> [[9, 9, 9]
35115//                      [9, 9, 9]]
35116// ```
35117//
35118// `tf.fill` differs from `tf.constant` in a few ways:
35119//
35120// *   `tf.fill` only supports scalar contents, whereas `tf.constant` supports
35121//     Tensor values.
35122// *   `tf.fill` creates an Op in the computation graph that constructs the actual
35123//     Tensor value at runtime. This is in contrast to `tf.constant` which embeds
35124//     the entire Tensor into the graph with a `Const` node.
35125// *   Because `tf.fill` evaluates at graph runtime, it supports dynamic shapes
35126//     based on other runtime Tensors, unlike `tf.constant`.
35127//
35128// Arguments:
35129//	dims: 1-D. Represents the shape of the output tensor.
35130//	value: 0-D (scalar). Value to fill the returned tensor.
35131//
35132// @compatibility(numpy)
35133// Equivalent to np.full
35134// @end_compatibility
35135func Fill(scope *Scope, dims tf.Output, value tf.Output) (output tf.Output) {
35136	if scope.Err() != nil {
35137		return
35138	}
35139	opspec := tf.OpSpec{
35140		Type: "Fill",
35141		Input: []tf.Input{
35142			dims, value,
35143		},
35144	}
35145	op := scope.AddOperation(opspec)
35146	return op.Output(0)
35147}
35148
35149// Converts a dense tensor to a (possibly batched) CSRSparseMatrix.
35150//
35151// Arguments:
35152//	dense_input: A Dense tensor.
35153//	indices: Indices of nonzero elements.
35154//
35155// Returns A (possibly batched) CSRSparseMatrix.
35156func DenseToCSRSparseMatrix(scope *Scope, dense_input tf.Output, indices tf.Output) (sparse_output tf.Output) {
35157	if scope.Err() != nil {
35158		return
35159	}
35160	opspec := tf.OpSpec{
35161		Type: "DenseToCSRSparseMatrix",
35162		Input: []tf.Input{
35163			dense_input, indices,
35164		},
35165	}
35166	op := scope.AddOperation(opspec)
35167	return op.Output(0)
35168}
35169
35170// Fills empty rows in the input 2-D `SparseTensor` with a default value.
35171//
35172// The input `SparseTensor` is represented via the tuple of inputs
35173// (`indices`, `values`, `dense_shape`).  The output `SparseTensor` has the
35174// same `dense_shape` but with indices `output_indices` and values
35175// `output_values`.
35176//
35177// This op inserts a single entry for every row that doesn't have any values.
35178// The index is created as `[row, 0, ..., 0]` and the inserted value
35179// is `default_value`.
35180//
35181// For example, suppose `sp_input` has shape `[5, 6]` and non-empty values:
35182//
35183//     [0, 1]: a
35184//     [0, 3]: b
35185//     [2, 0]: c
35186//     [3, 1]: d
35187//
35188// Rows 1 and 4 are empty, so the output will be of shape `[5, 6]` with values:
35189//
35190//     [0, 1]: a
35191//     [0, 3]: b
35192//     [1, 0]: default_value
35193//     [2, 0]: c
35194//     [3, 1]: d
35195//     [4, 0]: default_value
35196//
35197// The output `SparseTensor` will be in row-major order and will have the
35198// same shape as the input.
35199//
35200// This op also returns an indicator vector shaped `[dense_shape[0]]` such that
35201//
35202//     empty_row_indicator[i] = True iff row i was an empty row.
35203//
35204// And a reverse index map vector shaped `[indices.shape[0]]` that is used during
35205// backpropagation,
35206//
35207//     reverse_index_map[j] = out_j s.t. indices[j, :] == output_indices[out_j, :]
35208//
35209// Arguments:
35210//	indices: 2-D. the indices of the sparse tensor.
35211//	values: 1-D. the values of the sparse tensor.
35212//	dense_shape: 1-D. the shape of the sparse tensor.
35213//	default_value: 0-D. default value to insert into location `[row, 0, ..., 0]`
35214//   for rows missing from the input sparse tensor.
35215// output indices: 2-D. the indices of the filled sparse tensor.
35216//
35217// Returns:
35218//	output_indices
35219//	output_values: 1-D. the values of the filled sparse tensor.
35220//	empty_row_indicator: 1-D. whether the dense row was missing in the
35221// input sparse tensor.
35222//	reverse_index_map: 1-D. a map from the input indices to the output indices.
35223func SparseFillEmptyRows(scope *Scope, indices tf.Output, values tf.Output, dense_shape tf.Output, default_value tf.Output) (output_indices tf.Output, output_values tf.Output, empty_row_indicator tf.Output, reverse_index_map tf.Output) {
35224	if scope.Err() != nil {
35225		return
35226	}
35227	opspec := tf.OpSpec{
35228		Type: "SparseFillEmptyRows",
35229		Input: []tf.Input{
35230			indices, values, dense_shape, default_value,
35231		},
35232	}
35233	op := scope.AddOperation(opspec)
35234	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
35235}
35236
35237// Makes the summary of quantiles for the batch.
35238//
35239// An op that takes a list of tensors (one tensor per feature) and outputs the
35240// quantile summaries for each tensor.
35241//
35242// Arguments:
35243//	float_values: float; List of Rank 1 Tensors each containing values for a single feature.
35244//	example_weights: float; Rank 1 Tensor with weights per instance.
35245//	epsilon: float; The required maximum approximation error.
35246//
35247// Returns float; List of Rank 2 Tensors each containing the quantile summary
35248// (value, weight, min_rank, max_rank) of a single feature.
35249func BoostedTreesMakeQuantileSummaries(scope *Scope, float_values []tf.Output, example_weights tf.Output, epsilon tf.Output) (summaries []tf.Output) {
35250	if scope.Err() != nil {
35251		return
35252	}
35253	opspec := tf.OpSpec{
35254		Type: "BoostedTreesMakeQuantileSummaries",
35255		Input: []tf.Input{
35256			tf.OutputList(float_values), example_weights, epsilon,
35257		},
35258	}
35259	op := scope.AddOperation(opspec)
35260	if scope.Err() != nil {
35261		return
35262	}
35263	var idx int
35264	var err error
35265	if summaries, idx, err = makeOutputList(op, idx, "summaries"); err != nil {
35266		scope.UpdateErr("BoostedTreesMakeQuantileSummaries", err)
35267		return
35268	}
35269	return summaries
35270}
35271
35272// TakeManySparseFromTensorsMapAttr is an optional argument to TakeManySparseFromTensorsMap.
35273type TakeManySparseFromTensorsMapAttr func(optionalAttr)
35274
35275// TakeManySparseFromTensorsMapContainer sets the optional container attribute to value.
35276//
35277// value: The container name for the `SparseTensorsMap` read by this op.
35278// If not specified, defaults to ""
35279func TakeManySparseFromTensorsMapContainer(value string) TakeManySparseFromTensorsMapAttr {
35280	return func(m optionalAttr) {
35281		m["container"] = value
35282	}
35283}
35284
35285// TakeManySparseFromTensorsMapSharedName sets the optional shared_name attribute to value.
35286//
35287// value: The shared name for the `SparseTensorsMap` read by this op.
35288// It should not be blank; rather the `shared_name` or unique Operation name
35289// of the Op that created the original `SparseTensorsMap` should be used.
35290// If not specified, defaults to ""
35291func TakeManySparseFromTensorsMapSharedName(value string) TakeManySparseFromTensorsMapAttr {
35292	return func(m optionalAttr) {
35293		m["shared_name"] = value
35294	}
35295}
35296
35297// Read `SparseTensors` from a `SparseTensorsMap` and concatenate them.
35298//
35299// The input `sparse_handles` must be an `int64` matrix of shape `[N, 1]` where
35300// `N` is the minibatch size and the rows correspond to the output handles of
35301// `AddSparseToTensorsMap` or `AddManySparseToTensorsMap`.  The ranks of the
35302// original `SparseTensor` objects that went into the given input ops must all
35303// match.  When the final `SparseTensor` is created, it has rank one
35304// higher than the ranks of the incoming `SparseTensor` objects
35305// (they have been concatenated along a new row dimension on the left).
35306//
35307// The output `SparseTensor` object's shape values for all dimensions but the
35308// first are the max across the input `SparseTensor` objects' shape values
35309// for the corresponding dimensions.  Its first shape value is `N`, the minibatch
35310// size.
35311//
35312// The input `SparseTensor` objects' indices are assumed ordered in
35313// standard lexicographic order.  If this is not the case, after this
35314// step run `SparseReorder` to restore index ordering.
35315//
35316// For example, if the handles represent an input, which is a `[2, 3]` matrix
35317// representing two original `SparseTensor` objects:
35318//
35319// ```
35320//     index = [ 0]
35321//             [10]
35322//             [20]
35323//     values = [1, 2, 3]
35324//     shape = [50]
35325// ```
35326//
35327// and
35328//
35329// ```
35330//     index = [ 2]
35331//             [10]
35332//     values = [4, 5]
35333//     shape = [30]
35334// ```
35335//
35336// then the final `SparseTensor` will be:
35337//
35338// ```
35339//     index = [0  0]
35340//             [0 10]
35341//             [0 20]
35342//             [1  2]
35343//             [1 10]
35344//     values = [1, 2, 3, 4, 5]
35345//     shape = [2 50]
35346// ```
35347//
35348// Arguments:
35349//	sparse_handles: 1-D, The `N` serialized `SparseTensor` objects.
35350// Shape: `[N]`.
35351//	dtype: The `dtype` of the `SparseTensor` objects stored in the
35352// `SparseTensorsMap`.
35353//
35354// Returns:
35355//	sparse_indices: 2-D.  The `indices` of the minibatch `SparseTensor`.
35356//	sparse_values: 1-D.  The `values` of the minibatch `SparseTensor`.
35357//	sparse_shape: 1-D.  The `shape` of the minibatch `SparseTensor`.
35358func TakeManySparseFromTensorsMap(scope *Scope, sparse_handles tf.Output, dtype tf.DataType, optional ...TakeManySparseFromTensorsMapAttr) (sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output) {
35359	if scope.Err() != nil {
35360		return
35361	}
35362	attrs := map[string]interface{}{"dtype": dtype}
35363	for _, a := range optional {
35364		a(attrs)
35365	}
35366	opspec := tf.OpSpec{
35367		Type: "TakeManySparseFromTensorsMap",
35368		Input: []tf.Input{
35369			sparse_handles,
35370		},
35371		Attrs: attrs,
35372	}
35373	op := scope.AddOperation(opspec)
35374	return op.Output(0), op.Output(1), op.Output(2)
35375}
35376
35377// Compute the regularized incomplete beta integral \\(I_x(a, b)\\).
35378//
35379// The regularized incomplete beta integral is defined as:
35380//
35381//
35382// \\(I_x(a, b) = \frac{B(x; a, b)}{B(a, b)}\\)
35383//
35384// where
35385//
35386//
35387// \\(B(x; a, b) = \int_0^x t^{a-1} (1 - t)^{b-1} dt\\)
35388//
35389//
35390// is the incomplete beta function and \\(B(a, b)\\) is the *complete*
35391// beta function.
35392func Betainc(scope *Scope, a tf.Output, b tf.Output, x tf.Output) (z tf.Output) {
35393	if scope.Err() != nil {
35394		return
35395	}
35396	opspec := tf.OpSpec{
35397		Type: "Betainc",
35398		Input: []tf.Input{
35399			a, b, x,
35400		},
35401	}
35402	op := scope.AddOperation(opspec)
35403	return op.Output(0)
35404}
35405
35406// Reduces sparse updates into the variable referenced by `resource` using the `max` operation.
35407//
35408// This operation computes
35409//
35410//     # Scalar indices
35411//     ref[indices, ...] = max(ref[indices, ...], updates[...])
35412//
35413//     # Vector indices (for each i)
35414//     ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...])
35415//
35416//     # High rank indices (for each i, ..., j)
35417//     ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], updates[i, ..., j, ...])
35418//
35419// Duplicate entries are handled correctly: if multiple `indices` reference
35420// the same location, their contributions are combined.
35421//
35422// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
35423//
35424// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
35425// <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
35426// </div>
35427//
35428// Arguments:
35429//	resource: Should be from a `Variable` node.
35430//	indices: A tensor of indices into the first dimension of `ref`.
35431//	updates: A tensor of updated values to add to `ref`.
35432//
35433// Returns the created operation.
35434func ResourceScatterMax(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation) {
35435	if scope.Err() != nil {
35436		return
35437	}
35438	opspec := tf.OpSpec{
35439		Type: "ResourceScatterMax",
35440		Input: []tf.Input{
35441			resource, indices, updates,
35442		},
35443	}
35444	return scope.AddOperation(opspec)
35445}
35446
35447// AddManySparseToTensorsMapAttr is an optional argument to AddManySparseToTensorsMap.
35448type AddManySparseToTensorsMapAttr func(optionalAttr)
35449
35450// AddManySparseToTensorsMapContainer sets the optional container attribute to value.
35451//
35452// value: The container name for the `SparseTensorsMap` created by this op.
35453// If not specified, defaults to ""
35454func AddManySparseToTensorsMapContainer(value string) AddManySparseToTensorsMapAttr {
35455	return func(m optionalAttr) {
35456		m["container"] = value
35457	}
35458}
35459
35460// AddManySparseToTensorsMapSharedName sets the optional shared_name attribute to value.
35461//
35462// value: The shared name for the `SparseTensorsMap` created by this op.
35463// If blank, the new Operation's unique name is used.
35464// If not specified, defaults to ""
35465func AddManySparseToTensorsMapSharedName(value string) AddManySparseToTensorsMapAttr {
35466	return func(m optionalAttr) {
35467		m["shared_name"] = value
35468	}
35469}
35470
35471// Add an `N`-minibatch `SparseTensor` to a `SparseTensorsMap`, return `N` handles.
35472//
35473// A `SparseTensor` of rank `R` is represented by three tensors: `sparse_indices`,
35474// `sparse_values`, and `sparse_shape`, where
35475//
35476// ```sparse_indices.shape[1] == sparse_shape.shape[0] == R```
35477//
35478// An `N`-minibatch of `SparseTensor` objects is represented as a `SparseTensor`
35479// having a first `sparse_indices` column taking values between `[0, N)`, where
35480// the minibatch size `N == sparse_shape[0]`.
35481//
35482// The input `SparseTensor` must have rank `R` greater than 1, and the first
35483// dimension is treated as the minibatch dimension.  Elements of the `SparseTensor`
35484// must be sorted in increasing order of this first dimension.  The stored
35485// `SparseTensor` objects pointed to by each row of the output `sparse_handles`
35486// will have rank `R-1`.
35487//
35488// The `SparseTensor` values can then be read out as part of a minibatch by passing
35489// the given keys as vector elements to `TakeManySparseFromTensorsMap`.  To ensure
35490// the correct `SparseTensorsMap` is accessed, ensure that the same
35491// `container` and `shared_name` are passed to that Op.  If no `shared_name`
35492// is provided here, instead use the *name* of the Operation created by calling
35493// `AddManySparseToTensorsMap` as the `shared_name` passed to
35494// `TakeManySparseFromTensorsMap`.  Ensure the Operations are colocated.
35495//
35496// Arguments:
35497//	sparse_indices: 2-D.  The `indices` of the minibatch `SparseTensor`.
35498// `sparse_indices[:, 0]` must be ordered values in `[0, N)`.
35499//	sparse_values: 1-D.  The `values` of the minibatch `SparseTensor`.
35500//	sparse_shape: 1-D.  The `shape` of the minibatch `SparseTensor`.
35501// The minibatch size `N == sparse_shape[0]`.
35502//
35503// Returns 1-D.  The handles of the `SparseTensor` now stored in the
35504// `SparseTensorsMap`.  Shape: `[N]`.
35505func AddManySparseToTensorsMap(scope *Scope, sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output, optional ...AddManySparseToTensorsMapAttr) (sparse_handles tf.Output) {
35506	if scope.Err() != nil {
35507		return
35508	}
35509	attrs := map[string]interface{}{}
35510	for _, a := range optional {
35511		a(attrs)
35512	}
35513	opspec := tf.OpSpec{
35514		Type: "AddManySparseToTensorsMap",
35515		Input: []tf.Input{
35516			sparse_indices, sparse_values, sparse_shape,
35517		},
35518		Attrs: attrs,
35519	}
35520	op := scope.AddOperation(opspec)
35521	return op.Output(0)
35522}
35523
35524// Writes a histogram summary.
35525//
35526// Writes histogram `values` at `step` with `tag` using summary `writer`.
35527//
35528// Returns the created operation.
35529func WriteHistogramSummary(scope *Scope, writer tf.Output, step tf.Output, tag tf.Output, values tf.Output) (o *tf.Operation) {
35530	if scope.Err() != nil {
35531		return
35532	}
35533	opspec := tf.OpSpec{
35534		Type: "WriteHistogramSummary",
35535		Input: []tf.Input{
35536			writer, step, tag, values,
35537		},
35538	}
35539	return scope.AddOperation(opspec)
35540}
35541
35542// Computes tan of x element-wise.
35543//
35544//   Given an input tensor, this function computes tangent of every
35545//   element in the tensor. Input range is `(-inf, inf)` and
35546//   output range is `(-inf, inf)`. If input lies outside the boundary, `nan`
35547//   is returned.
35548//
35549//   ```python
35550//   x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, float("inf")])
35551//   tf.math.tan(x) ==> [nan 0.45231566 -0.5463025 1.5574077 2.572152 -1.7925274 0.32097113 nan]
35552//   ```
35553func Tan(scope *Scope, x tf.Output) (y tf.Output) {
35554	if scope.Err() != nil {
35555		return
35556	}
35557	opspec := tf.OpSpec{
35558		Type: "Tan",
35559		Input: []tf.Input{
35560			x,
35561		},
35562	}
35563	op := scope.AddOperation(opspec)
35564	return op.Output(0)
35565}
35566
35567// Advance the counter of a counter-based RNG.
35568//
35569// The state of the RNG after
35570// `rng_skip(n)` will be the same as that after `stateful_uniform([n])`
35571// (or any other distribution). The actual increment added to the
35572// counter is an unspecified implementation detail.
35573//
35574// Arguments:
35575//	resource: The handle of the resource variable that stores the state of the RNG.
35576//	algorithm: The RNG algorithm.
35577//	delta: The amount of advancement.
35578//
35579// Returns the created operation.
35580func RngSkip(scope *Scope, resource tf.Output, algorithm tf.Output, delta tf.Output) (o *tf.Operation) {
35581	if scope.Err() != nil {
35582		return
35583	}
35584	opspec := tf.OpSpec{
35585		Type: "RngSkip",
35586		Input: []tf.Input{
35587			resource, algorithm, delta,
35588		},
35589	}
35590	return scope.AddOperation(opspec)
35591}
35592
35593// Computes the maximum along segments of a tensor.
35594//
35595// Read
35596// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
35597// for an explanation of segments.
35598//
35599// This operator is similar to the unsorted segment sum operator found
35600// [(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).
35601// Instead of computing the sum over segments, it computes the maximum such that:
35602//
35603// \\(output_i = \max_{j...} data[j...]\\) where max is over tuples `j...` such
35604// that `segment_ids[j...] == i`.
35605//
35606// If the maximum is empty for a given segment ID `i`, it outputs the smallest
35607// possible value for the specific numeric type,
35608// `output[i] = numeric_limits<T>::lowest()`.
35609//
35610// If the given segment ID `i` is negative, then the corresponding value is
35611// dropped, and will not be included in the result.
35612//
35613// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
35614// <img style="width:100%" src="https://www.tensorflow.org/images/UnsortedSegmentMax.png" alt>
35615// </div>
35616//
35617// For example:
35618//
35619// ``` python
35620// c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]])
35621// tf.unsorted_segment_max(c, tf.constant([0, 1, 0]), num_segments=2)
35622// # ==> [[ 4,  3, 3, 4],
35623// #       [5,  6, 7, 8]]
35624// ```
35625//
35626//
35627// Arguments:
35628//
35629//	segment_ids: A tensor whose shape is a prefix of `data.shape`.
35630//
35631//
35632// Returns Has same shape as data, except for the first `segment_ids.rank`
35633// dimensions, which are replaced with a single dimension which has size
35634// `num_segments`.
35635func UnsortedSegmentMax(scope *Scope, data tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output) {
35636	if scope.Err() != nil {
35637		return
35638	}
35639	opspec := tf.OpSpec{
35640		Type: "UnsortedSegmentMax",
35641		Input: []tf.Input{
35642			data, segment_ids, num_segments,
35643		},
35644	}
35645	op := scope.AddOperation(opspec)
35646	return op.Output(0)
35647}
35648
35649// StringUpperAttr is an optional argument to StringUpper.
35650type StringUpperAttr func(optionalAttr)
35651
35652// StringUpperEncoding sets the optional encoding attribute to value.
35653// If not specified, defaults to ""
35654func StringUpperEncoding(value string) StringUpperAttr {
35655	return func(m optionalAttr) {
35656		m["encoding"] = value
35657	}
35658}
35659
35660// Converts all lowercase characters into their respective uppercase replacements.
35661//
35662// Example:
35663//
35664// >>> tf.strings.upper("CamelCase string and ALL CAPS")
35665// <tf.Tensor: shape=(), dtype=string, numpy=b'CAMELCASE STRING AND ALL CAPS'>
35666//
35667func StringUpper(scope *Scope, input tf.Output, optional ...StringUpperAttr) (output tf.Output) {
35668	if scope.Err() != nil {
35669		return
35670	}
35671	attrs := map[string]interface{}{}
35672	for _, a := range optional {
35673		a(attrs)
35674	}
35675	opspec := tf.OpSpec{
35676		Type: "StringUpper",
35677		Input: []tf.Input{
35678			input,
35679		},
35680		Attrs: attrs,
35681	}
35682	op := scope.AddOperation(opspec)
35683	return op.Output(0)
35684}
35685
35686// SparseReduceMaxAttr is an optional argument to SparseReduceMax.
35687type SparseReduceMaxAttr func(optionalAttr)
35688
35689// SparseReduceMaxKeepDims sets the optional keep_dims attribute to value.
35690//
35691// value: If true, retain reduced dimensions with length 1.
35692// If not specified, defaults to false
35693func SparseReduceMaxKeepDims(value bool) SparseReduceMaxAttr {
35694	return func(m optionalAttr) {
35695		m["keep_dims"] = value
35696	}
35697}
35698
35699// Computes the max of elements across dimensions of a SparseTensor.
35700//
35701// This Op takes a SparseTensor and is the sparse counterpart to
35702// `tf.reduce_max()`.  In particular, this Op also returns a dense `Tensor`
35703// instead of a sparse one.
35704//
35705// Reduces `sp_input` along the dimensions given in `reduction_axes`.  Unless
35706// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
35707// `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
35708// with length 1.
35709//
35710// If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
35711// with a single element is returned.  Additionally, the axes can be negative,
35712// which are interpreted according to the indexing rules in Python.
35713//
35714// Arguments:
35715//	input_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
35716// SparseTensor, possibly not in canonical ordering.
35717//	input_values: 1-D.  `N` non-empty values corresponding to `input_indices`.
35718//	input_shape: 1-D.  Shape of the input SparseTensor.
35719//	reduction_axes: 1-D.  Length-`K` vector containing the reduction axes.
35720//
35721// Returns `R-K`-D.  The reduced Tensor.
35722func SparseReduceMax(scope *Scope, input_indices tf.Output, input_values tf.Output, input_shape tf.Output, reduction_axes tf.Output, optional ...SparseReduceMaxAttr) (output tf.Output) {
35723	if scope.Err() != nil {
35724		return
35725	}
35726	attrs := map[string]interface{}{}
35727	for _, a := range optional {
35728		a(attrs)
35729	}
35730	opspec := tf.OpSpec{
35731		Type: "SparseReduceMax",
35732		Input: []tf.Input{
35733			input_indices, input_values, input_shape, reduction_axes,
35734		},
35735		Attrs: attrs,
35736	}
35737	op := scope.AddOperation(opspec)
35738	return op.Output(0)
35739}
35740
35741// Generates sparse cross from a list of sparse and dense tensors.
35742//
35743// The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each
35744// representing features of one feature column. It outputs a 2D `SparseTensor` with
35745// the batchwise crosses of these features.
35746//
35747// For example, if the inputs are
35748//
35749//     inputs[0]: SparseTensor with shape = [2, 2]
35750//     [0, 0]: "a"
35751//     [1, 0]: "b"
35752//     [1, 1]: "c"
35753//
35754//     inputs[1]: SparseTensor with shape = [2, 1]
35755//     [0, 0]: "d"
35756//     [1, 0]: "e"
35757//
35758//     inputs[2]: Tensor [["f"], ["g"]]
35759//
35760// then the output will be
35761//
35762//     shape = [2, 2]
35763//     [0, 0]: "a_X_d_X_f"
35764//     [1, 0]: "b_X_e_X_g"
35765//     [1, 1]: "c_X_e_X_g"
35766//
35767// if hashed_output=true then the output will be
35768//
35769//     shape = [2, 2]
35770//     [0, 0]: FingerprintCat64(
35771//                 Fingerprint64("f"), FingerprintCat64(
35772//                     Fingerprint64("d"), Fingerprint64("a")))
35773//     [1, 0]: FingerprintCat64(
35774//                 Fingerprint64("g"), FingerprintCat64(
35775//                     Fingerprint64("e"), Fingerprint64("b")))
35776//     [1, 1]: FingerprintCat64(
35777//                 Fingerprint64("g"), FingerprintCat64(
35778//                     Fingerprint64("e"), Fingerprint64("c")))
35779//
35780// Arguments:
35781//	indices: 2-D.  Indices of each input `SparseTensor`.
35782//	values: 1-D.   values of each `SparseTensor`.
35783//	shapes: 1-D.   Shapes of each `SparseTensor`.
35784//	dense_inputs: 2-D.    Columns represented by dense `Tensor`.
35785//	num_buckets: It is used if hashed_output is true.
35786// output = hashed_value%num_buckets if num_buckets > 0 else hashed_value.
35787//	strong_hash: boolean, if true, siphash with salt will be used instead of farmhash.
35788//	salt: Specify the salt that will be used by the siphash function.
35789//
35790// Returns:
35791//	output_indices: 2-D.  Indices of the concatenated `SparseTensor`.
35792//	output_values: 1-D.  Non-empty values of the concatenated or hashed
35793// `SparseTensor`.
35794//	output_shape: 1-D.  Shape of the concatenated `SparseTensor`.
35795func SparseCrossHashed(scope *Scope, indices []tf.Output, values []tf.Output, shapes []tf.Output, dense_inputs []tf.Output, num_buckets tf.Output, strong_hash tf.Output, salt tf.Output) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output) {
35796	if scope.Err() != nil {
35797		return
35798	}
35799	opspec := tf.OpSpec{
35800		Type: "SparseCrossHashed",
35801		Input: []tf.Input{
35802			tf.OutputList(indices), tf.OutputList(values), tf.OutputList(shapes), tf.OutputList(dense_inputs), num_buckets, strong_hash, salt,
35803		},
35804	}
35805	op := scope.AddOperation(opspec)
35806	return op.Output(0), op.Output(1), op.Output(2)
35807}
35808
35809// Conv3DBackpropInputAttr is an optional argument to Conv3DBackpropInput.
35810type Conv3DBackpropInputAttr func(optionalAttr)
35811
35812// Conv3DBackpropInputDilations sets the optional dilations attribute to value.
35813// If not specified, defaults to <i:1 i:1 i:1 i:1 i:1 >
35814func Conv3DBackpropInputDilations(value []int64) Conv3DBackpropInputAttr {
35815	return func(m optionalAttr) {
35816		m["dilations"] = value
35817	}
35818}
35819
35820// Computes the gradients of 3-D convolution with respect to the input.
35821//
35822// DEPRECATED at GraphDef version 10: Use Conv3DBackpropInputV2
35823//
35824// Arguments:
35825//	input: Shape `[batch, depth, rows, cols, in_channels]`.
35826//	filter: Shape `[depth, rows, cols, in_channels, out_channels]`.
35827// `in_channels` must match between `input` and `filter`.
35828//	out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
35829// out_channels]`.
35830//	strides: 1-D tensor of length 5. The stride of the sliding window for each
35831// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
35832//	padding: The type of padding algorithm to use.
35833func Conv3DBackpropInput(scope *Scope, input tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv3DBackpropInputAttr) (output tf.Output) {
35834	if scope.Err() != nil {
35835		return
35836	}
35837	attrs := map[string]interface{}{"strides": strides, "padding": padding}
35838	for _, a := range optional {
35839		a(attrs)
35840	}
35841	opspec := tf.OpSpec{
35842		Type: "Conv3DBackpropInput",
35843		Input: []tf.Input{
35844			input, filter, out_backprop,
35845		},
35846		Attrs: attrs,
35847	}
35848	op := scope.AddOperation(opspec)
35849	return op.Output(0)
35850}
35851
35852// QuantizedInstanceNormAttr is an optional argument to QuantizedInstanceNorm.
35853type QuantizedInstanceNormAttr func(optionalAttr)
35854
35855// QuantizedInstanceNormOutputRangeGiven sets the optional output_range_given attribute to value.
35856//
35857// value: If True, `given_y_min` and `given_y_min`
35858// and `given_y_max` are used as the output range. Otherwise,
35859// the implementation computes the output range.
35860// If not specified, defaults to false
35861func QuantizedInstanceNormOutputRangeGiven(value bool) QuantizedInstanceNormAttr {
35862	return func(m optionalAttr) {
35863		m["output_range_given"] = value
35864	}
35865}
35866
35867// QuantizedInstanceNormGivenYMin sets the optional given_y_min attribute to value.
35868//
35869// value: Output in `y_min` if `output_range_given` is True.
35870// If not specified, defaults to 0
35871func QuantizedInstanceNormGivenYMin(value float32) QuantizedInstanceNormAttr {
35872	return func(m optionalAttr) {
35873		m["given_y_min"] = value
35874	}
35875}
35876
35877// QuantizedInstanceNormGivenYMax sets the optional given_y_max attribute to value.
35878//
35879// value: Output in `y_max` if `output_range_given` is True.
35880// If not specified, defaults to 0
35881func QuantizedInstanceNormGivenYMax(value float32) QuantizedInstanceNormAttr {
35882	return func(m optionalAttr) {
35883		m["given_y_max"] = value
35884	}
35885}
35886
35887// QuantizedInstanceNormVarianceEpsilon sets the optional variance_epsilon attribute to value.
35888//
35889// value: A small float number to avoid dividing by 0.
35890// If not specified, defaults to 1e-05
35891func QuantizedInstanceNormVarianceEpsilon(value float32) QuantizedInstanceNormAttr {
35892	return func(m optionalAttr) {
35893		m["variance_epsilon"] = value
35894	}
35895}
35896
35897// QuantizedInstanceNormMinSeparation sets the optional min_separation attribute to value.
35898//
35899// value: Minimum value of `y_max - y_min`
35900// If not specified, defaults to 0.001
35901func QuantizedInstanceNormMinSeparation(value float32) QuantizedInstanceNormAttr {
35902	return func(m optionalAttr) {
35903		m["min_separation"] = value
35904	}
35905}
35906
35907// Quantized Instance normalization.
35908//
35909// Arguments:
35910//	x: A 4D input Tensor.
35911//	x_min: The value represented by the lowest quantized input.
35912//	x_max: The value represented by the highest quantized input.
35913//
35914// Returns:
35915//	y: A 4D Tensor.
35916//	y_min: The value represented by the lowest quantized output.
35917//	y_max: The value represented by the highest quantized output.
35918func QuantizedInstanceNorm(scope *Scope, x tf.Output, x_min tf.Output, x_max tf.Output, optional ...QuantizedInstanceNormAttr) (y tf.Output, y_min tf.Output, y_max tf.Output) {
35919	if scope.Err() != nil {
35920		return
35921	}
35922	attrs := map[string]interface{}{}
35923	for _, a := range optional {
35924		a(attrs)
35925	}
35926	opspec := tf.OpSpec{
35927		Type: "QuantizedInstanceNorm",
35928		Input: []tf.Input{
35929			x, x_min, x_max,
35930		},
35931		Attrs: attrs,
35932	}
35933	op := scope.AddOperation(opspec)
35934	return op.Output(0), op.Output(1), op.Output(2)
35935}
35936
35937// FusedBatchNormV3Attr is an optional argument to FusedBatchNormV3.
35938type FusedBatchNormV3Attr func(optionalAttr)
35939
35940// FusedBatchNormV3Epsilon sets the optional epsilon attribute to value.
35941//
35942// value: A small float number added to the variance of x.
35943// If not specified, defaults to 0.0001
35944func FusedBatchNormV3Epsilon(value float32) FusedBatchNormV3Attr {
35945	return func(m optionalAttr) {
35946		m["epsilon"] = value
35947	}
35948}
35949
35950// FusedBatchNormV3ExponentialAvgFactor sets the optional exponential_avg_factor attribute to value.
35951// If not specified, defaults to 1
35952func FusedBatchNormV3ExponentialAvgFactor(value float32) FusedBatchNormV3Attr {
35953	return func(m optionalAttr) {
35954		m["exponential_avg_factor"] = value
35955	}
35956}
35957
35958// FusedBatchNormV3DataFormat sets the optional data_format attribute to value.
35959//
35960// value: The data format for x and y. Either "NHWC" (default) or "NCHW".
35961// If not specified, defaults to "NHWC"
35962func FusedBatchNormV3DataFormat(value string) FusedBatchNormV3Attr {
35963	return func(m optionalAttr) {
35964		m["data_format"] = value
35965	}
35966}
35967
35968// FusedBatchNormV3IsTraining sets the optional is_training attribute to value.
35969//
35970// value: A bool value to indicate the operation is for training (default)
35971// or inference.
35972// If not specified, defaults to true
35973func FusedBatchNormV3IsTraining(value bool) FusedBatchNormV3Attr {
35974	return func(m optionalAttr) {
35975		m["is_training"] = value
35976	}
35977}
35978
35979// Batch normalization.
35980//
35981// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
35982// The size of 1D Tensors matches the dimension C of the 4D Tensors.
35983//
35984// Arguments:
35985//	x: A 4D Tensor for input data.
35986//	scale: A 1D Tensor for scaling factor, to scale the normalized x.
35987//	offset: A 1D Tensor for offset, to shift to the normalized x.
35988//	mean: A 1D Tensor for population mean. Used for inference only;
35989// must be empty for training.
35990//	variance: A 1D Tensor for population variance. Used for inference only;
35991// must be empty for training.
35992//
35993// Returns:
35994//	y: A 4D Tensor for output data.
35995//	batch_mean: A 1D Tensor for the computed batch mean, to be used by TensorFlow
35996// to compute the running mean.
35997//	batch_variance: A 1D Tensor for the computed batch variance, to be used by
35998// TensorFlow to compute the running variance.
35999//	reserve_space_1: A 1D Tensor for the computed batch mean, to be reused
36000// in the gradient computation.
36001//	reserve_space_2: A 1D Tensor for the computed batch variance (inverted variance
36002// in the cuDNN case), to be reused in the gradient computation.
36003//	reserve_space_3: A 1D Tensor for some intermediate results, to be reused in the gradient
36004// computation for better efficiency.
36005func FusedBatchNormV3(scope *Scope, x tf.Output, scale tf.Output, offset tf.Output, mean tf.Output, variance tf.Output, optional ...FusedBatchNormV3Attr) (y tf.Output, batch_mean tf.Output, batch_variance tf.Output, reserve_space_1 tf.Output, reserve_space_2 tf.Output, reserve_space_3 tf.Output) {
36006	if scope.Err() != nil {
36007		return
36008	}
36009	attrs := map[string]interface{}{}
36010	for _, a := range optional {
36011		a(attrs)
36012	}
36013	opspec := tf.OpSpec{
36014		Type: "FusedBatchNormV3",
36015		Input: []tf.Input{
36016			x, scale, offset, mean, variance,
36017		},
36018		Attrs: attrs,
36019	}
36020	op := scope.AddOperation(opspec)
36021	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4), op.Output(5)
36022}
36023
36024// Computes reciprocal of square root of x element-wise.
36025//
36026// I.e., \\(y = 1 / \sqrt{x}\\).
36027func Rsqrt(scope *Scope, x tf.Output) (y tf.Output) {
36028	if scope.Err() != nil {
36029		return
36030	}
36031	opspec := tf.OpSpec{
36032		Type: "Rsqrt",
36033		Input: []tf.Input{
36034			x,
36035		},
36036	}
36037	op := scope.AddOperation(opspec)
36038	return op.Output(0)
36039}
36040
36041// The gradient operator for the SparseSlice op.
36042//
36043// This op takes in the upstream gradient w.r.t. non-empty values of
36044// the sliced `SparseTensor`, and outputs the gradients w.r.t.
36045// the non-empty values of input `SparseTensor`.
36046//
36047// Arguments:
36048//	backprop_val_grad: 1-D. The gradient with respect to
36049// the non-empty values of the sliced `SparseTensor`.
36050//	input_indices: 2-D.  The `indices` of the input `SparseTensor`.
36051//	input_start: 1-D. tensor represents the start of the slice.
36052//	output_indices: 2-D.  The `indices` of the sliced `SparseTensor`.
36053//
36054// Returns 1-D. The gradient with respect to the non-empty values of input `SparseTensor`.
36055func SparseSliceGrad(scope *Scope, backprop_val_grad tf.Output, input_indices tf.Output, input_start tf.Output, output_indices tf.Output) (val_grad tf.Output) {
36056	if scope.Err() != nil {
36057		return
36058	}
36059	opspec := tf.OpSpec{
36060		Type: "SparseSliceGrad",
36061		Input: []tf.Input{
36062			backprop_val_grad, input_indices, input_start, output_indices,
36063		},
36064	}
36065	op := scope.AddOperation(opspec)
36066	return op.Output(0)
36067}
36068
36069// Generates sparse cross from a list of sparse and dense tensors.
36070//
36071// The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each
36072// representing features of one feature column. It outputs a 2D `SparseTensor` with
36073// the batchwise crosses of these features.
36074//
36075// For example, if the inputs are
36076//
36077//     inputs[0]: SparseTensor with shape = [2, 2]
36078//     [0, 0]: "a"
36079//     [1, 0]: "b"
36080//     [1, 1]: "c"
36081//
36082//     inputs[1]: SparseTensor with shape = [2, 1]
36083//     [0, 0]: "d"
36084//     [1, 0]: "e"
36085//
36086//     inputs[2]: Tensor [["f"], ["g"]]
36087//
36088// then the output will be
36089//
36090//     shape = [2, 2]
36091//     [0, 0]: "a_X_d_X_f"
36092//     [1, 0]: "b_X_e_X_g"
36093//     [1, 1]: "c_X_e_X_g"
36094//
36095// if hashed_output=true then the output will be
36096//
36097//     shape = [2, 2]
36098//     [0, 0]: FingerprintCat64(
36099//                 Fingerprint64("f"), FingerprintCat64(
36100//                     Fingerprint64("d"), Fingerprint64("a")))
36101//     [1, 0]: FingerprintCat64(
36102//                 Fingerprint64("g"), FingerprintCat64(
36103//                     Fingerprint64("e"), Fingerprint64("b")))
36104//     [1, 1]: FingerprintCat64(
36105//                 Fingerprint64("g"), FingerprintCat64(
36106//                     Fingerprint64("e"), Fingerprint64("c")))
36107//
36108// Arguments:
36109//	indices: 2-D.  Indices of each input `SparseTensor`.
36110//	values: 1-D.   values of each `SparseTensor`.
36111//	shapes: 1-D.   Shapes of each `SparseTensor`.
36112//	dense_inputs: 2-D.    Columns represented by dense `Tensor`.
36113//	sep: string used when joining a list of string inputs, can be used as separator later.
36114//
36115// Returns:
36116//	output_indices: 2-D.  Indices of the concatenated `SparseTensor`.
36117//	output_values: 1-D.  Non-empty values of the concatenated or hashed
36118// `SparseTensor`.
36119//	output_shape: 1-D.  Shape of the concatenated `SparseTensor`.
36120func SparseCrossV2(scope *Scope, indices []tf.Output, values []tf.Output, shapes []tf.Output, dense_inputs []tf.Output, sep tf.Output) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output) {
36121	if scope.Err() != nil {
36122		return
36123	}
36124	opspec := tf.OpSpec{
36125		Type: "SparseCrossV2",
36126		Input: []tf.Input{
36127			tf.OutputList(indices), tf.OutputList(values), tf.OutputList(shapes), tf.OutputList(dense_inputs), sep,
36128		},
36129	}
36130	op := scope.AddOperation(opspec)
36131	return op.Output(0), op.Output(1), op.Output(2)
36132}
36133
36134// Generates sparse cross from a list of sparse and dense tensors.
36135//
36136// The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each
36137// representing features of one feature column. It outputs a 2D `SparseTensor` with
36138// the batchwise crosses of these features.
36139//
36140// For example, if the inputs are
36141//
36142//     inputs[0]: SparseTensor with shape = [2, 2]
36143//     [0, 0]: "a"
36144//     [1, 0]: "b"
36145//     [1, 1]: "c"
36146//
36147//     inputs[1]: SparseTensor with shape = [2, 1]
36148//     [0, 0]: "d"
36149//     [1, 0]: "e"
36150//
36151//     inputs[2]: Tensor [["f"], ["g"]]
36152//
36153// then the output will be
36154//
36155//     shape = [2, 2]
36156//     [0, 0]: "a_X_d_X_f"
36157//     [1, 0]: "b_X_e_X_g"
36158//     [1, 1]: "c_X_e_X_g"
36159//
36160// if hashed_output=true then the output will be
36161//
36162//     shape = [2, 2]
36163//     [0, 0]: FingerprintCat64(
36164//                 Fingerprint64("f"), FingerprintCat64(
36165//                     Fingerprint64("d"), Fingerprint64("a")))
36166//     [1, 0]: FingerprintCat64(
36167//                 Fingerprint64("g"), FingerprintCat64(
36168//                     Fingerprint64("e"), Fingerprint64("b")))
36169//     [1, 1]: FingerprintCat64(
36170//                 Fingerprint64("g"), FingerprintCat64(
36171//                     Fingerprint64("e"), Fingerprint64("c")))
36172//
36173// Arguments:
36174//	indices: 2-D.  Indices of each input `SparseTensor`.
36175//	values: 1-D.   values of each `SparseTensor`.
36176//	shapes: 1-D.   Shapes of each `SparseTensor`.
36177//	dense_inputs: 2-D.    Columns represented by dense `Tensor`.
36178//	hashed_output: If true, returns the hash of the cross instead of the string.
36179// This will allow us avoiding string manipulations.
36180//	num_buckets: It is used if hashed_output is true.
36181// output = hashed_value%num_buckets if num_buckets > 0 else hashed_value.
36182//	hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
36183// function to combine the crosses fingerprints.
36184//
36185//
36186//
36187// Returns:
36188//	output_indices: 2-D.  Indices of the concatenated `SparseTensor`.
36189//	output_values: 1-D.  Non-empty values of the concatenated or hashed
36190// `SparseTensor`.
36191//	output_shape: 1-D.  Shape of the concatenated `SparseTensor`.
36192func SparseCross(scope *Scope, indices []tf.Output, values []tf.Output, shapes []tf.Output, dense_inputs []tf.Output, hashed_output bool, num_buckets int64, hash_key int64, out_type tf.DataType, internal_type tf.DataType) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output) {
36193	if scope.Err() != nil {
36194		return
36195	}
36196	attrs := map[string]interface{}{"hashed_output": hashed_output, "num_buckets": num_buckets, "hash_key": hash_key, "out_type": out_type, "internal_type": internal_type}
36197	opspec := tf.OpSpec{
36198		Type: "SparseCross",
36199		Input: []tf.Input{
36200			tf.OutputList(indices), tf.OutputList(values), tf.OutputList(shapes), tf.OutputList(dense_inputs),
36201		},
36202		Attrs: attrs,
36203	}
36204	op := scope.AddOperation(opspec)
36205	return op.Output(0), op.Output(1), op.Output(2)
36206}
36207
36208// Writes a scalar summary.
36209//
36210// Writes scalar `value` at `step` with `tag` using summary `writer`.
36211//
36212// Returns the created operation.
36213func WriteScalarSummary(scope *Scope, writer tf.Output, step tf.Output, tag tf.Output, value tf.Output) (o *tf.Operation) {
36214	if scope.Err() != nil {
36215		return
36216	}
36217	opspec := tf.OpSpec{
36218		Type: "WriteScalarSummary",
36219		Input: []tf.Input{
36220			writer, step, tag, value,
36221		},
36222	}
36223	return scope.AddOperation(opspec)
36224}
36225
36226// RetrieveTPUEmbeddingProximalAdagradParametersAttr is an optional argument to RetrieveTPUEmbeddingProximalAdagradParameters.
36227type RetrieveTPUEmbeddingProximalAdagradParametersAttr func(optionalAttr)
36228
36229// RetrieveTPUEmbeddingProximalAdagradParametersTableId sets the optional table_id attribute to value.
36230// If not specified, defaults to -1
36231func RetrieveTPUEmbeddingProximalAdagradParametersTableId(value int64) RetrieveTPUEmbeddingProximalAdagradParametersAttr {
36232	return func(m optionalAttr) {
36233		m["table_id"] = value
36234	}
36235}
36236
36237// RetrieveTPUEmbeddingProximalAdagradParametersTableName sets the optional table_name attribute to value.
36238// If not specified, defaults to ""
36239func RetrieveTPUEmbeddingProximalAdagradParametersTableName(value string) RetrieveTPUEmbeddingProximalAdagradParametersAttr {
36240	return func(m optionalAttr) {
36241		m["table_name"] = value
36242	}
36243}
36244
36245// RetrieveTPUEmbeddingProximalAdagradParametersConfig sets the optional config attribute to value.
36246// If not specified, defaults to ""
36247func RetrieveTPUEmbeddingProximalAdagradParametersConfig(value string) RetrieveTPUEmbeddingProximalAdagradParametersAttr {
36248	return func(m optionalAttr) {
36249		m["config"] = value
36250	}
36251}
36252
36253// Retrieve proximal Adagrad embedding parameters.
36254//
36255// An op that retrieves optimization parameters from embedding to host
36256// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
36257// the correct embedding table configuration. For example, this op is
36258// used to retrieve updated parameters before saving a checkpoint.
36259//
36260// Returns:
36261//	parameters: Parameter parameters updated by the proximal Adagrad optimization algorithm.
36262//	accumulators: Parameter accumulators updated by the proximal Adagrad optimization algorithm.
36263func RetrieveTPUEmbeddingProximalAdagradParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingProximalAdagradParametersAttr) (parameters tf.Output, accumulators tf.Output) {
36264	if scope.Err() != nil {
36265		return
36266	}
36267	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
36268	for _, a := range optional {
36269		a(attrs)
36270	}
36271	opspec := tf.OpSpec{
36272		Type: "RetrieveTPUEmbeddingProximalAdagradParameters",
36273
36274		Attrs: attrs,
36275	}
36276	op := scope.AddOperation(opspec)
36277	return op.Output(0), op.Output(1)
36278}
36279
36280// ReduceJoinAttr is an optional argument to ReduceJoin.
36281type ReduceJoinAttr func(optionalAttr)
36282
36283// ReduceJoinKeepDims sets the optional keep_dims attribute to value.
36284//
36285// value: If `True`, retain reduced dimensions with length `1`.
36286// If not specified, defaults to false
36287func ReduceJoinKeepDims(value bool) ReduceJoinAttr {
36288	return func(m optionalAttr) {
36289		m["keep_dims"] = value
36290	}
36291}
36292
36293// ReduceJoinSeparator sets the optional separator attribute to value.
36294//
36295// value: The separator to use when joining.
36296// If not specified, defaults to ""
36297func ReduceJoinSeparator(value string) ReduceJoinAttr {
36298	return func(m optionalAttr) {
36299		m["separator"] = value
36300	}
36301}
36302
36303// Joins a string Tensor across the given dimensions.
36304//
36305// Computes the string join across dimensions in the given string Tensor of shape
36306// `[\\(d_0, d_1, ..., d_{n-1}\\)]`.  Returns a new Tensor created by joining the input
36307// strings with the given separator (default: empty string).  Negative indices are
36308// counted backwards from the end, with `-1` being equivalent to `n - 1`.  If
36309// indices are not specified, joins across all dimensions beginning from `n - 1`
36310// through `0`.
36311//
36312// For example:
36313//
36314// ```python
36315// # tensor `a` is [["a", "b"], ["c", "d"]]
36316// tf.reduce_join(a, 0) ==> ["ac", "bd"]
36317// tf.reduce_join(a, 1) ==> ["ab", "cd"]
36318// tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> ["ac", "bd"]
36319// tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> ["ab", "cd"]
36320// tf.reduce_join(a, 0, keep_dims=True) ==> [["ac", "bd"]]
36321// tf.reduce_join(a, 1, keep_dims=True) ==> [["ab"], ["cd"]]
36322// tf.reduce_join(a, 0, separator=".") ==> ["a.c", "b.d"]
36323// tf.reduce_join(a, [0, 1]) ==> "acbd"
36324// tf.reduce_join(a, [1, 0]) ==> "abcd"
36325// tf.reduce_join(a, []) ==> [["a", "b"], ["c", "d"]]
36326// tf.reduce_join(a) = tf.reduce_join(a, [1, 0]) ==> "abcd"
36327// ```
36328//
36329// Arguments:
36330//	inputs: The input to be joined.  All reduced indices must have non-zero size.
36331//	reduction_indices: The dimensions to reduce over.  Dimensions are reduced in the
36332// order specified.  Omitting `reduction_indices` is equivalent to passing
36333// `[n-1, n-2, ..., 0]`.  Negative indices from `-n` to `-1` are supported.
36334//
36335// Returns Has shape equal to that of the input with reduced dimensions removed or
36336// set to `1` depending on `keep_dims`.
36337func ReduceJoin(scope *Scope, inputs tf.Output, reduction_indices tf.Output, optional ...ReduceJoinAttr) (output tf.Output) {
36338	if scope.Err() != nil {
36339		return
36340	}
36341	attrs := map[string]interface{}{}
36342	for _, a := range optional {
36343		a(attrs)
36344	}
36345	opspec := tf.OpSpec{
36346		Type: "ReduceJoin",
36347		Input: []tf.Input{
36348			inputs, reduction_indices,
36349		},
36350		Attrs: attrs,
36351	}
36352	op := scope.AddOperation(opspec)
36353	return op.Output(0)
36354}
36355
36356// Inverse 2D fast Fourier transform.
36357//
36358// Computes the inverse 2-dimensional discrete Fourier transform over the
36359// inner-most 2 dimensions of `input`.
36360//
36361// Arguments:
36362//	input: A complex tensor.
36363//
36364// Returns A complex tensor of the same shape as `input`. The inner-most 2
36365//   dimensions of `input` are replaced with their inverse 2D Fourier transform.
36366//
36367// @compatibility(numpy)
36368// Equivalent to np.fft.ifft2
36369// @end_compatibility
36370func IFFT2D(scope *Scope, input tf.Output) (output tf.Output) {
36371	if scope.Err() != nil {
36372		return
36373	}
36374	opspec := tf.OpSpec{
36375		Type: "IFFT2D",
36376		Input: []tf.Input{
36377			input,
36378		},
36379	}
36380	op := scope.AddOperation(opspec)
36381	return op.Output(0)
36382}
36383
36384// Concatenates a list of `SparseTensor` along the specified dimension.
36385//
36386// Concatenation is with respect to the dense versions of these sparse tensors.
36387// It is assumed that each input is a `SparseTensor` whose elements are ordered
36388// along increasing dimension number.
36389//
36390// All inputs' shapes must match, except for the concat dimension.  The
36391// `indices`, `values`, and `shapes` lists must have the same length.
36392//
36393// The output shape is identical to the inputs', except along the concat
36394// dimension, where it is the sum of the inputs' sizes along that dimension.
36395//
36396// The output elements will be resorted to preserve the sort order along
36397// increasing dimension number.
36398//
36399// This op runs in `O(M log M)` time, where `M` is the total number of non-empty
36400// values across all inputs. This is due to the need for an internal sort in
36401// order to concatenate efficiently across an arbitrary dimension.
36402//
36403// For example, if `concat_dim = 1` and the inputs are
36404//
36405//     sp_inputs[0]: shape = [2, 3]
36406//     [0, 2]: "a"
36407//     [1, 0]: "b"
36408//     [1, 1]: "c"
36409//
36410//     sp_inputs[1]: shape = [2, 4]
36411//     [0, 1]: "d"
36412//     [0, 2]: "e"
36413//
36414// then the output will be
36415//
36416//     shape = [2, 7]
36417//     [0, 2]: "a"
36418//     [0, 4]: "d"
36419//     [0, 5]: "e"
36420//     [1, 0]: "b"
36421//     [1, 1]: "c"
36422//
36423// Graphically this is equivalent to doing
36424//
36425//     [    a] concat [  d e  ] = [    a   d e  ]
36426//     [b c  ]        [       ]   [b c          ]
36427//
36428// Arguments:
36429//	indices: 2-D.  Indices of each input `SparseTensor`.
36430//	values: 1-D.  Non-empty values of each `SparseTensor`.
36431//	shapes: 1-D.  Shapes of each `SparseTensor`.
36432//	concat_dim: Dimension to concatenate along. Must be in range [-rank, rank),
36433// where rank is the number of dimensions in each input `SparseTensor`.
36434//
36435// Returns:
36436//	output_indices: 2-D.  Indices of the concatenated `SparseTensor`.
36437//	output_values: 1-D.  Non-empty values of the concatenated `SparseTensor`.
36438//	output_shape: 1-D.  Shape of the concatenated `SparseTensor`.
36439func SparseConcat(scope *Scope, indices []tf.Output, values []tf.Output, shapes []tf.Output, concat_dim int64) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output) {
36440	if scope.Err() != nil {
36441		return
36442	}
36443	attrs := map[string]interface{}{"concat_dim": concat_dim}
36444	opspec := tf.OpSpec{
36445		Type: "SparseConcat",
36446		Input: []tf.Input{
36447			tf.OutputList(indices), tf.OutputList(values), tf.OutputList(shapes),
36448		},
36449		Attrs: attrs,
36450	}
36451	op := scope.AddOperation(opspec)
36452	return op.Output(0), op.Output(1), op.Output(2)
36453}
36454
36455// DestroyResourceOpAttr is an optional argument to DestroyResourceOp.
36456type DestroyResourceOpAttr func(optionalAttr)
36457
36458// DestroyResourceOpIgnoreLookupError sets the optional ignore_lookup_error attribute to value.
36459//
36460// value: whether to ignore the error when the resource
36461// doesn't exist.
36462// If not specified, defaults to true
36463func DestroyResourceOpIgnoreLookupError(value bool) DestroyResourceOpAttr {
36464	return func(m optionalAttr) {
36465		m["ignore_lookup_error"] = value
36466	}
36467}
36468
36469// Deletes the resource specified by the handle.
36470//
36471// All subsequent operations using the resource will result in a NotFound
36472// error status.
36473//
36474// Arguments:
36475//	resource: handle to the resource to delete.
36476//
36477// Returns the created operation.
36478func DestroyResourceOp(scope *Scope, resource tf.Output, optional ...DestroyResourceOpAttr) (o *tf.Operation) {
36479	if scope.Err() != nil {
36480		return
36481	}
36482	attrs := map[string]interface{}{}
36483	for _, a := range optional {
36484		a(attrs)
36485	}
36486	opspec := tf.OpSpec{
36487		Type: "DestroyResourceOp",
36488		Input: []tf.Input{
36489			resource,
36490		},
36491		Attrs: attrs,
36492	}
36493	return scope.AddOperation(opspec)
36494}
36495
36496// HistogramFixedWidthAttr is an optional argument to HistogramFixedWidth.
36497type HistogramFixedWidthAttr func(optionalAttr)
36498
36499// HistogramFixedWidthDtype sets the optional dtype attribute to value.
36500// If not specified, defaults to DT_INT32
36501func HistogramFixedWidthDtype(value tf.DataType) HistogramFixedWidthAttr {
36502	return func(m optionalAttr) {
36503		m["dtype"] = value
36504	}
36505}
36506
36507// Return histogram of values.
36508//
36509// Given the tensor `values`, this operation returns a rank 1 histogram counting
36510// the number of entries in `values` that fall into every bin.  The bins are
36511// equal width and determined by the arguments `value_range` and `nbins`.
36512//
36513// ```python
36514// # Bins will be:  (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
36515// nbins = 5
36516// value_range = [0.0, 5.0]
36517// new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]
36518//
36519// with tf.get_default_session() as sess:
36520//   hist = tf.histogram_fixed_width(new_values, value_range, nbins=5)
36521//   variables.global_variables_initializer().run()
36522//   sess.run(hist) => [2, 1, 1, 0, 2]
36523// ```
36524//
36525// Arguments:
36526//	values: Numeric `Tensor`.
36527//	value_range: Shape [2] `Tensor` of same `dtype` as `values`.
36528// values <= value_range[0] will be mapped to hist[0],
36529// values >= value_range[1] will be mapped to hist[-1].
36530//	nbins: Scalar `int32 Tensor`.  Number of histogram bins.
36531//
36532// Returns A 1-D `Tensor` holding histogram of values.
36533func HistogramFixedWidth(scope *Scope, values tf.Output, value_range tf.Output, nbins tf.Output, optional ...HistogramFixedWidthAttr) (out tf.Output) {
36534	if scope.Err() != nil {
36535		return
36536	}
36537	attrs := map[string]interface{}{}
36538	for _, a := range optional {
36539		a(attrs)
36540	}
36541	opspec := tf.OpSpec{
36542		Type: "HistogramFixedWidth",
36543		Input: []tf.Input{
36544			values, value_range, nbins,
36545		},
36546		Attrs: attrs,
36547	}
36548	op := scope.AddOperation(opspec)
36549	return op.Output(0)
36550}
36551
36552// Bitcasts a tensor from one type to another without copying data.
36553//
36554// Given a tensor `input`, this operation returns a tensor that has the same buffer
36555// data as `input` with datatype `type`.
36556//
36557// If the input datatype `T` is larger than the output datatype `type` then the
36558// shape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)].
36559//
36560// If `T` is smaller than `type`, the operator requires that the rightmost
36561// dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from
36562// [..., sizeof(`type`)/sizeof(`T`)] to [...].
36563//
36564// tf.bitcast() and tf.cast() work differently when real dtype is casted as a complex dtype
36565// (e.g. tf.complex64 or tf.complex128) as tf.cast() make imaginary part 0 while tf.bitcast()
36566// gives module error.
36567// For example,
36568//
36569// Example 1:
36570//
36571// >>> a = [1., 2., 3.]
36572// >>> equality_bitcast = tf.bitcast(a, tf.complex128)
36573// Traceback (most recent call last):
36574// ...
36575// InvalidArgumentError: Cannot bitcast from 1 to 18 [Op:Bitcast]
36576// >>> equality_cast = tf.cast(a, tf.complex128)
36577// >>> print(equality_cast)
36578// tf.Tensor([1.+0.j 2.+0.j 3.+0.j], shape=(3,), dtype=complex128)
36579//
36580// Example 2:
36581//
36582// >>> tf.bitcast(tf.constant(0xffffffff, dtype=tf.uint32), tf.uint8)
36583// <tf.Tensor: shape=(4,), dtype=uint8, numpy=array([255, 255, 255, 255], dtype=uint8)>
36584//
36585// Example 3:
36586//
36587// >>> x = [1., 2., 3.]
36588// >>> y = [0., 2., 3.]
36589// >>> equality= tf.equal(x,y)
36590// >>> equality_cast = tf.cast(equality,tf.float32)
36591// >>> equality_bitcast = tf.bitcast(equality_cast,tf.uint8)
36592// >>> print(equality)
36593// tf.Tensor([False True True], shape=(3,), dtype=bool)
36594// >>> print(equality_cast)
36595// tf.Tensor([0. 1. 1.], shape=(3,), dtype=float32)
36596// >>> print(equality_bitcast)
36597// tf.Tensor(
36598//     [[  0   0   0   0]
36599//      [  0   0 128  63]
36600//      [  0   0 128  63]], shape=(3, 4), dtype=uint8)
36601//
36602// *NOTE*: Bitcast is implemented as a low-level cast, so machines with different
36603// endian orderings will give different results.
36604func Bitcast(scope *Scope, input tf.Output, type_ tf.DataType) (output tf.Output) {
36605	if scope.Err() != nil {
36606		return
36607	}
36608	attrs := map[string]interface{}{"type": type_}
36609	opspec := tf.OpSpec{
36610		Type: "Bitcast",
36611		Input: []tf.Input{
36612			input,
36613		},
36614		Attrs: attrs,
36615	}
36616	op := scope.AddOperation(opspec)
36617	return op.Output(0)
36618}
36619
36620// Creates a dataset that uses a custom thread pool to compute `input_dataset`.
36621//
36622// Arguments:
36623//
36624//	thread_pool: A resource produced by the ThreadPoolHandle op.
36625//
36626//
36627func ThreadPoolDataset(scope *Scope, input_dataset tf.Output, thread_pool tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
36628	if scope.Err() != nil {
36629		return
36630	}
36631	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
36632	opspec := tf.OpSpec{
36633		Type: "ThreadPoolDataset",
36634		Input: []tf.Input{
36635			input_dataset, thread_pool,
36636		},
36637		Attrs: attrs,
36638	}
36639	op := scope.AddOperation(opspec)
36640	return op.Output(0)
36641}
36642
36643// ResourceApplyAdagradDAAttr is an optional argument to ResourceApplyAdagradDA.
36644type ResourceApplyAdagradDAAttr func(optionalAttr)
36645
36646// ResourceApplyAdagradDAUseLocking sets the optional use_locking attribute to value.
36647//
36648// value: If True, updating of the var and accum tensors will be protected by
36649// a lock; otherwise the behavior is undefined, but may exhibit less contention.
36650// If not specified, defaults to false
36651func ResourceApplyAdagradDAUseLocking(value bool) ResourceApplyAdagradDAAttr {
36652	return func(m optionalAttr) {
36653		m["use_locking"] = value
36654	}
36655}
36656
36657// Update '*var' according to the proximal adagrad scheme.
36658//
36659// Arguments:
36660//	var_: Should be from a Variable().
36661//	gradient_accumulator: Should be from a Variable().
36662//	gradient_squared_accumulator: Should be from a Variable().
36663//	grad: The gradient.
36664//	lr: Scaling factor. Must be a scalar.
36665//	l1: L1 regularization. Must be a scalar.
36666//	l2: L2 regularization. Must be a scalar.
36667//	global_step: Training step number. Must be a scalar.
36668//
36669// Returns the created operation.
36670func ResourceApplyAdagradDA(scope *Scope, var_ tf.Output, gradient_accumulator tf.Output, gradient_squared_accumulator tf.Output, grad tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, global_step tf.Output, optional ...ResourceApplyAdagradDAAttr) (o *tf.Operation) {
36671	if scope.Err() != nil {
36672		return
36673	}
36674	attrs := map[string]interface{}{}
36675	for _, a := range optional {
36676		a(attrs)
36677	}
36678	opspec := tf.OpSpec{
36679		Type: "ResourceApplyAdagradDA",
36680		Input: []tf.Input{
36681			var_, gradient_accumulator, gradient_squared_accumulator, grad, lr, l1, l2, global_step,
36682		},
36683		Attrs: attrs,
36684	}
36685	return scope.AddOperation(opspec)
36686}
36687
36688// SparseToDenseAttr is an optional argument to SparseToDense.
36689type SparseToDenseAttr func(optionalAttr)
36690
36691// SparseToDenseValidateIndices sets the optional validate_indices attribute to value.
36692//
36693// value: If true, indices are checked to make sure they are sorted in
36694// lexicographic order and that there are no repeats.
36695// If not specified, defaults to true
36696func SparseToDenseValidateIndices(value bool) SparseToDenseAttr {
36697	return func(m optionalAttr) {
36698		m["validate_indices"] = value
36699	}
36700}
36701
36702// Converts a sparse representation into a dense tensor.
36703//
36704// Builds an array `dense` with shape `output_shape` such that
36705//
36706// ```
36707// # If sparse_indices is scalar
36708// dense[i] = (i == sparse_indices ? sparse_values : default_value)
36709//
36710// # If sparse_indices is a vector, then for each i
36711// dense[sparse_indices[i]] = sparse_values[i]
36712//
36713// # If sparse_indices is an n by d matrix, then for each i in [0, n)
36714// dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i]
36715// ```
36716//
36717// All other values in `dense` are set to `default_value`.  If `sparse_values` is a
36718// scalar, all sparse indices are set to this single value.
36719//
36720// Indices should be sorted in lexicographic order, and indices must not
36721// contain any repeats. If `validate_indices` is true, these properties
36722// are checked during execution.
36723//
36724// Arguments:
36725//	sparse_indices: 0-D, 1-D, or 2-D.  `sparse_indices[i]` contains the complete
36726// index where `sparse_values[i]` will be placed.
36727//	output_shape: 1-D.  Shape of the dense output tensor.
36728//	sparse_values: 1-D.  Values corresponding to each row of `sparse_indices`,
36729// or a scalar value to be used for all sparse indices.
36730//	default_value: Scalar value to set for indices not specified in
36731// `sparse_indices`.
36732//
36733// Returns Dense output tensor of shape `output_shape`.
36734func SparseToDense(scope *Scope, sparse_indices tf.Output, output_shape tf.Output, sparse_values tf.Output, default_value tf.Output, optional ...SparseToDenseAttr) (dense tf.Output) {
36735	if scope.Err() != nil {
36736		return
36737	}
36738	attrs := map[string]interface{}{}
36739	for _, a := range optional {
36740		a(attrs)
36741	}
36742	opspec := tf.OpSpec{
36743		Type: "SparseToDense",
36744		Input: []tf.Input{
36745			sparse_indices, output_shape, sparse_values, default_value,
36746		},
36747		Attrs: attrs,
36748	}
36749	op := scope.AddOperation(opspec)
36750	return op.Output(0)
36751}
36752
36753// OrderedMapClearAttr is an optional argument to OrderedMapClear.
36754type OrderedMapClearAttr func(optionalAttr)
36755
36756// OrderedMapClearCapacity sets the optional capacity attribute to value.
36757// If not specified, defaults to 0
36758//
36759// REQUIRES: value >= 0
36760func OrderedMapClearCapacity(value int64) OrderedMapClearAttr {
36761	return func(m optionalAttr) {
36762		m["capacity"] = value
36763	}
36764}
36765
36766// OrderedMapClearMemoryLimit sets the optional memory_limit attribute to value.
36767// If not specified, defaults to 0
36768//
36769// REQUIRES: value >= 0
36770func OrderedMapClearMemoryLimit(value int64) OrderedMapClearAttr {
36771	return func(m optionalAttr) {
36772		m["memory_limit"] = value
36773	}
36774}
36775
36776// OrderedMapClearContainer sets the optional container attribute to value.
36777// If not specified, defaults to ""
36778func OrderedMapClearContainer(value string) OrderedMapClearAttr {
36779	return func(m optionalAttr) {
36780		m["container"] = value
36781	}
36782}
36783
36784// OrderedMapClearSharedName sets the optional shared_name attribute to value.
36785// If not specified, defaults to ""
36786func OrderedMapClearSharedName(value string) OrderedMapClearAttr {
36787	return func(m optionalAttr) {
36788		m["shared_name"] = value
36789	}
36790}
36791
36792// Op removes all elements in the underlying container.
36793//
36794// Returns the created operation.
36795func OrderedMapClear(scope *Scope, dtypes []tf.DataType, optional ...OrderedMapClearAttr) (o *tf.Operation) {
36796	if scope.Err() != nil {
36797		return
36798	}
36799	attrs := map[string]interface{}{"dtypes": dtypes}
36800	for _, a := range optional {
36801		a(attrs)
36802	}
36803	opspec := tf.OpSpec{
36804		Type: "OrderedMapClear",
36805
36806		Attrs: attrs,
36807	}
36808	return scope.AddOperation(opspec)
36809}
36810
36811// MaxPoolAttr is an optional argument to MaxPool.
36812type MaxPoolAttr func(optionalAttr)
36813
36814// MaxPoolExplicitPaddings sets the optional explicit_paddings attribute to value.
36815// If not specified, defaults to <>
36816func MaxPoolExplicitPaddings(value []int64) MaxPoolAttr {
36817	return func(m optionalAttr) {
36818		m["explicit_paddings"] = value
36819	}
36820}
36821
36822// MaxPoolDataFormat sets the optional data_format attribute to value.
36823//
36824// value: Specify the data format of the input and output data. With the
36825// default format "NHWC", the data is stored in the order of:
36826//     [batch, in_height, in_width, in_channels].
36827// Alternatively, the format could be "NCHW", the data storage order of:
36828//     [batch, in_channels, in_height, in_width].
36829// If not specified, defaults to "NHWC"
36830func MaxPoolDataFormat(value string) MaxPoolAttr {
36831	return func(m optionalAttr) {
36832		m["data_format"] = value
36833	}
36834}
36835
36836// Performs max pooling on the input.
36837//
36838// Arguments:
36839//	input: 4-D input to pool over.
36840//	ksize: The size of the window for each dimension of the input tensor.
36841//	strides: The stride of the sliding window for each dimension of the
36842// input tensor.
36843//	padding: The type of padding algorithm to use.
36844//
36845// Returns The max pooled output tensor.
36846func MaxPool(scope *Scope, input tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPoolAttr) (output tf.Output) {
36847	if scope.Err() != nil {
36848		return
36849	}
36850	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
36851	for _, a := range optional {
36852		a(attrs)
36853	}
36854	opspec := tf.OpSpec{
36855		Type: "MaxPool",
36856		Input: []tf.Input{
36857			input,
36858		},
36859		Attrs: attrs,
36860	}
36861	op := scope.AddOperation(opspec)
36862	return op.Output(0)
36863}
36864
36865// BlockLSTMAttr is an optional argument to BlockLSTM.
36866type BlockLSTMAttr func(optionalAttr)
36867
36868// BlockLSTMForgetBias sets the optional forget_bias attribute to value.
36869//
36870// value: The forget gate bias.
36871// If not specified, defaults to 1
36872func BlockLSTMForgetBias(value float32) BlockLSTMAttr {
36873	return func(m optionalAttr) {
36874		m["forget_bias"] = value
36875	}
36876}
36877
36878// BlockLSTMCellClip sets the optional cell_clip attribute to value.
36879//
36880// value: Value to clip the 'cs' value to.
36881// If not specified, defaults to 3
36882func BlockLSTMCellClip(value float32) BlockLSTMAttr {
36883	return func(m optionalAttr) {
36884		m["cell_clip"] = value
36885	}
36886}
36887
36888// BlockLSTMUsePeephole sets the optional use_peephole attribute to value.
36889//
36890// value: Whether to use peephole weights.
36891// If not specified, defaults to false
36892func BlockLSTMUsePeephole(value bool) BlockLSTMAttr {
36893	return func(m optionalAttr) {
36894		m["use_peephole"] = value
36895	}
36896}
36897
36898// Computes the LSTM cell forward propagation for all the time steps.
36899//
36900// This is equivalent to applying LSTMBlockCell in a loop, like so:
36901//
36902// ```python
36903// for x1 in unpack(x):
36904//   i1, cs1, f1, o1, ci1, co1, h1 = LSTMBlock(
36905//     x1, cs_prev, h_prev, w, wci, wcf, wco, b)
36906//   cs_prev = cs1
36907//   h_prev = h1
36908//   i.append(i1)
36909//   cs.append(cs1)
36910//   f.append(f1)
36911//   o.append(o1)
36912//   ci.append(ci1)
36913//   co.append(co1)
36914//   h.append(h1)
36915// return pack(i), pack(cs), pack(f), pack(o), pack(ci), pack(ch), pack(h)
36916// ```
36917//
36918// Arguments:
36919//	seq_len_max: Maximum time length actually used by this input. Outputs are padded
36920// with zeros beyond this length.
36921//	x: The sequence input to the LSTM, shape (timelen, batch_size, num_inputs).
36922//	cs_prev: Value of the initial cell state.
36923//	h_prev: Initial output of cell (to be used for peephole).
36924//	w: The weight matrix.
36925//	wci: The weight matrix for input gate peephole connection.
36926//	wcf: The weight matrix for forget gate peephole connection.
36927//	wco: The weight matrix for output gate peephole connection.
36928//	b: The bias vector.
36929//
36930// Returns:
36931//	i: The input gate over the whole time sequence.
36932//	cs: The cell state before the tanh over the whole time sequence.
36933//	f: The forget gate over the whole time sequence.
36934//	o: The output gate over the whole time sequence.
36935//	ci: The cell input over the whole time sequence.
36936//	co: The cell after the tanh over the whole time sequence.
36937//	h: The output h vector over the whole time sequence.
36938func BlockLSTM(scope *Scope, seq_len_max tf.Output, x tf.Output, cs_prev tf.Output, h_prev tf.Output, w tf.Output, wci tf.Output, wcf tf.Output, wco tf.Output, b tf.Output, optional ...BlockLSTMAttr) (i tf.Output, cs tf.Output, f tf.Output, o tf.Output, ci tf.Output, co tf.Output, h tf.Output) {
36939	if scope.Err() != nil {
36940		return
36941	}
36942	attrs := map[string]interface{}{}
36943	for _, a := range optional {
36944		a(attrs)
36945	}
36946	opspec := tf.OpSpec{
36947		Type: "BlockLSTM",
36948		Input: []tf.Input{
36949			seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b,
36950		},
36951		Attrs: attrs,
36952	}
36953	op := scope.AddOperation(opspec)
36954	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4), op.Output(5), op.Output(6)
36955}
36956
36957// Computes the GRU cell forward propagation for 1 time step.
36958//
36959// Args
36960//     x: Input to the GRU cell.
36961//     h_prev: State input from the previous GRU cell.
36962//     w_ru: Weight matrix for the reset and update gate.
36963//     w_c: Weight matrix for the cell connection gate.
36964//     b_ru: Bias vector for the reset and update gate.
36965//     b_c: Bias vector for the cell connection gate.
36966//
36967// Returns
36968//     r: Output of the reset gate.
36969//     u: Output of the update gate.
36970//     c: Output of the cell connection gate.
36971//     h: Current state of the GRU cell.
36972//
36973// Note on notation of the variables:
36974//
36975// Concatenation of a and b is represented by a_b
36976// Element-wise dot product of a and b is represented by ab
36977// Element-wise dot product is represented by \circ
36978// Matrix multiplication is represented by *
36979//
36980// Biases are initialized with :
36981// `b_ru` - constant_initializer(1.0)
36982// `b_c` - constant_initializer(0.0)
36983//
36984// This kernel op implements the following mathematical equations:
36985//
36986// ```
36987// x_h_prev = [x, h_prev]
36988//
36989// [r_bar u_bar] = x_h_prev * w_ru + b_ru
36990//
36991// r = sigmoid(r_bar)
36992// u = sigmoid(u_bar)
36993//
36994// h_prevr = h_prev \circ r
36995//
36996// x_h_prevr = [x h_prevr]
36997//
36998// c_bar = x_h_prevr * w_c + b_c
36999// c = tanh(c_bar)
37000//
37001// h = (1-u) \circ c + u \circ h_prev
37002// ```
37003func GRUBlockCell(scope *Scope, x tf.Output, h_prev tf.Output, w_ru tf.Output, w_c tf.Output, b_ru tf.Output, b_c tf.Output) (r tf.Output, u tf.Output, c tf.Output, h tf.Output) {
37004	if scope.Err() != nil {
37005		return
37006	}
37007	opspec := tf.OpSpec{
37008		Type: "GRUBlockCell",
37009		Input: []tf.Input{
37010			x, h_prev, w_ru, w_c, b_ru, b_c,
37011		},
37012	}
37013	op := scope.AddOperation(opspec)
37014	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
37015}
37016
37017// Writes the given dataset to the given file using the TFRecord format.
37018//
37019// Arguments:
37020//	input_dataset: A variant tensor representing the dataset to write.
37021//	filename: A scalar string tensor representing the filename to use.
37022//	compression_type: A scalar string tensor containing either (i) the empty string (no
37023// compression), (ii) "ZLIB", or (iii) "GZIP".
37024//
37025// Returns the created operation.
37026func DatasetToTFRecord(scope *Scope, input_dataset tf.Output, filename tf.Output, compression_type tf.Output) (o *tf.Operation) {
37027	if scope.Err() != nil {
37028		return
37029	}
37030	opspec := tf.OpSpec{
37031		Type: "DatasetToTFRecord",
37032		Input: []tf.Input{
37033			input_dataset, filename, compression_type,
37034		},
37035	}
37036	return scope.AddOperation(opspec)
37037}
37038
37039// ResourceSparseApplyCenteredRMSPropAttr is an optional argument to ResourceSparseApplyCenteredRMSProp.
37040type ResourceSparseApplyCenteredRMSPropAttr func(optionalAttr)
37041
37042// ResourceSparseApplyCenteredRMSPropUseLocking sets the optional use_locking attribute to value.
37043//
37044// value: If `True`, updating of the var, mg, ms, and mom tensors is
37045// protected by a lock; otherwise the behavior is undefined, but may exhibit less
37046// contention.
37047// If not specified, defaults to false
37048func ResourceSparseApplyCenteredRMSPropUseLocking(value bool) ResourceSparseApplyCenteredRMSPropAttr {
37049	return func(m optionalAttr) {
37050		m["use_locking"] = value
37051	}
37052}
37053
37054// Update '*var' according to the centered RMSProp algorithm.
37055//
37056// The centered RMSProp algorithm uses an estimate of the centered second moment
37057// (i.e., the variance) for normalization, as opposed to regular RMSProp, which
37058// uses the (uncentered) second moment. This often helps with training, but is
37059// slightly more expensive in terms of computation and memory.
37060//
37061// Note that in dense implementation of this algorithm, mg, ms, and mom will
37062// update even if the grad is zero, but in this sparse implementation, mg, ms,
37063// and mom will not update in iterations during which the grad is zero.
37064//
37065// mean_square = decay * mean_square + (1-decay) * gradient ** 2
37066// mean_grad = decay * mean_grad + (1-decay) * gradient
37067// Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
37068//
37069// ms <- rho * ms_{t-1} + (1-rho) * grad * grad
37070// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
37071// var <- var - mom
37072//
37073// Arguments:
37074//	var_: Should be from a Variable().
37075//	mg: Should be from a Variable().
37076//	ms: Should be from a Variable().
37077//	mom: Should be from a Variable().
37078//	lr: Scaling factor. Must be a scalar.
37079//	rho: Decay rate. Must be a scalar.
37080//
37081//	epsilon: Ridge term. Must be a scalar.
37082//	grad: The gradient.
37083//	indices: A vector of indices into the first dimension of var, ms and mom.
37084//
37085// Returns the created operation.
37086func ResourceSparseApplyCenteredRMSProp(scope *Scope, var_ tf.Output, mg tf.Output, ms tf.Output, mom tf.Output, lr tf.Output, rho tf.Output, momentum tf.Output, epsilon tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyCenteredRMSPropAttr) (o *tf.Operation) {
37087	if scope.Err() != nil {
37088		return
37089	}
37090	attrs := map[string]interface{}{}
37091	for _, a := range optional {
37092		a(attrs)
37093	}
37094	opspec := tf.OpSpec{
37095		Type: "ResourceSparseApplyCenteredRMSProp",
37096		Input: []tf.Input{
37097			var_, mg, ms, mom, lr, rho, momentum, epsilon, grad, indices,
37098		},
37099		Attrs: attrs,
37100	}
37101	return scope.AddOperation(opspec)
37102}
37103
37104// Elementwise computes the bitwise XOR of `x` and `y`.
37105//
37106// The result will have those bits set, that are different in `x` and `y`. The
37107// computation is performed on the underlying representations of `x` and `y`.
37108//
37109// For example:
37110//
37111// ```python
37112// import tensorflow as tf
37113// from tensorflow.python.ops import bitwise_ops
37114// dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64,
37115//               tf.uint8, tf.uint16, tf.uint32, tf.uint64]
37116//
37117// for dtype in dtype_list:
37118//   lhs = tf.constant([0, 5, 3, 14], dtype=dtype)
37119//   rhs = tf.constant([5, 0, 7, 11], dtype=dtype)
37120//   exp = tf.constant([5, 5, 4, 5],  dtype=tf.float32)
37121//
37122//   res = bitwise_ops.bitwise_xor(lhs, rhs)
37123//   tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE
37124// ```
37125//
37126func BitwiseXor(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
37127	if scope.Err() != nil {
37128		return
37129	}
37130	opspec := tf.OpSpec{
37131		Type: "BitwiseXor",
37132		Input: []tf.Input{
37133			x, y,
37134		},
37135	}
37136	op := scope.AddOperation(opspec)
37137	return op.Output(0)
37138}
37139
37140// Adds two `SparseTensor` objects to produce another `SparseTensor`.
37141//
37142// The input `SparseTensor` objects' indices are assumed ordered in standard
37143// lexicographic order.  If this is not the case, before this step run
37144// `SparseReorder` to restore index ordering.
37145//
37146// By default, if two values sum to zero at some index, the output `SparseTensor`
37147// would still include that particular location in its index, storing a zero in the
37148// corresponding value slot.  To override this, callers can specify `thresh`,
37149// indicating that if the sum has a magnitude strictly smaller than `thresh`, its
37150// corresponding value and index would then not be included.  In particular,
37151// `thresh == 0` (default) means everything is kept and actual thresholding happens
37152// only for a positive value.
37153//
37154// In the following shapes, `nnz` is the count after taking `thresh` into account.
37155//
37156// Arguments:
37157//	a_indices: 2-D.  The `indices` of the first `SparseTensor`, size `[nnz, ndims]` Matrix.
37158//	a_values: 1-D.  The `values` of the first `SparseTensor`, size `[nnz]` Vector.
37159//	a_shape: 1-D.  The `shape` of the first `SparseTensor`, size `[ndims]` Vector.
37160//	b_indices: 2-D.  The `indices` of the second `SparseTensor`, size `[nnz, ndims]` Matrix.
37161//	b_values: 1-D.  The `values` of the second `SparseTensor`, size `[nnz]` Vector.
37162//	b_shape: 1-D.  The `shape` of the second `SparseTensor`, size `[ndims]` Vector.
37163//	thresh: 0-D.  The magnitude threshold that determines if an output value/index
37164// pair takes space.
37165func SparseAdd(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, b_indices tf.Output, b_values tf.Output, b_shape tf.Output, thresh tf.Output) (sum_indices tf.Output, sum_values tf.Output, sum_shape tf.Output) {
37166	if scope.Err() != nil {
37167		return
37168	}
37169	opspec := tf.OpSpec{
37170		Type: "SparseAdd",
37171		Input: []tf.Input{
37172			a_indices, a_values, a_shape, b_indices, b_values, b_shape, thresh,
37173		},
37174	}
37175	op := scope.AddOperation(opspec)
37176	return op.Output(0), op.Output(1), op.Output(2)
37177}
37178
37179// Selects elements from `x` or `y`, depending on `condition`.
37180//
37181// The `x`, and `y` tensors must all have the same shape, and the
37182// output will also have that shape.
37183//
37184// The `condition` tensor must be a scalar if `x` and `y` are scalars.
37185// If `x` and `y` are vectors or higher rank, then `condition` must be either a
37186// scalar, a vector with size matching the first dimension of `x`, or must have
37187// the same shape as `x`.
37188//
37189// The `condition` tensor acts as a mask that chooses, based on the value at each
37190// element, whether the corresponding element / row in the output should be
37191// taken from `x` (if true) or `y` (if false).
37192//
37193// If `condition` is a vector and `x` and `y` are higher rank matrices, then
37194// it chooses which row (outer dimension) to copy from `x` and `y`.
37195// If `condition` has the same shape as `x` and `y`, then it chooses which
37196// element to copy from `x` and `y`.
37197//
37198// For example:
37199//
37200// ```python
37201// # 'condition' tensor is [[True,  False]
37202// #                        [False, True]]
37203// # 't' is [[1, 2],
37204// #         [3, 4]]
37205// # 'e' is [[5, 6],
37206// #         [7, 8]]
37207// select(condition, t, e)  # => [[1, 6], [7, 4]]
37208//
37209//
37210// # 'condition' tensor is [True, False]
37211// # 't' is [[1, 2],
37212// #         [3, 4]]
37213// # 'e' is [[5, 6],
37214// #         [7, 8]]
37215// select(condition, t, e) ==> [[1, 2],
37216//                              [7, 8]]
37217//
37218// ```
37219//
37220// Arguments:
37221//
37222//	x: = A `Tensor` which may have the same shape as `condition`.
37223// If `condition` is rank 1, `x` may have higher rank,
37224// but its first dimension must match the size of `condition`.
37225//	y: = A `Tensor` with the same type and shape as `x`.
37226//
37227// Returns = A `Tensor` with the same type and shape as `x` and `y`.
37228func Select(scope *Scope, condition tf.Output, x tf.Output, y tf.Output) (output tf.Output) {
37229	if scope.Err() != nil {
37230		return
37231	}
37232	opspec := tf.OpSpec{
37233		Type: "Select",
37234		Input: []tf.Input{
37235			condition, x, y,
37236		},
37237	}
37238	op := scope.AddOperation(opspec)
37239	return op.Output(0)
37240}
37241
37242// The gradient operator for the SparseAdd op.
37243//
37244// The SparseAdd op calculates A + B, where A, B, and the sum are all represented
37245// as `SparseTensor` objects.  This op takes in the upstream gradient w.r.t.
37246// non-empty values of the sum, and outputs the gradients w.r.t. the non-empty
37247// values of A and B.
37248//
37249// Arguments:
37250//	backprop_val_grad: 1-D with shape `[nnz(sum)]`.  The gradient with respect to
37251// the non-empty values of the sum.
37252//	a_indices: 2-D.  The `indices` of the `SparseTensor` A, size `[nnz(A), ndims]`.
37253//	b_indices: 2-D.  The `indices` of the `SparseTensor` B, size `[nnz(B), ndims]`.
37254//	sum_indices: 2-D.  The `indices` of the sum `SparseTensor`, size
37255// `[nnz(sum), ndims]`.
37256//
37257// Returns:
37258//	a_val_grad: 1-D with shape `[nnz(A)]`. The gradient with respect to the
37259// non-empty values of A.
37260//	b_val_grad: 1-D with shape `[nnz(B)]`. The gradient with respect to the
37261// non-empty values of B.
37262func SparseAddGrad(scope *Scope, backprop_val_grad tf.Output, a_indices tf.Output, b_indices tf.Output, sum_indices tf.Output) (a_val_grad tf.Output, b_val_grad tf.Output) {
37263	if scope.Err() != nil {
37264		return
37265	}
37266	opspec := tf.OpSpec{
37267		Type: "SparseAddGrad",
37268		Input: []tf.Input{
37269			backprop_val_grad, a_indices, b_indices, sum_indices,
37270		},
37271	}
37272	op := scope.AddOperation(opspec)
37273	return op.Output(0), op.Output(1)
37274}
37275
37276// StridedSliceGradAttr is an optional argument to StridedSliceGrad.
37277type StridedSliceGradAttr func(optionalAttr)
37278
37279// StridedSliceGradBeginMask sets the optional begin_mask attribute to value.
37280// If not specified, defaults to 0
37281func StridedSliceGradBeginMask(value int64) StridedSliceGradAttr {
37282	return func(m optionalAttr) {
37283		m["begin_mask"] = value
37284	}
37285}
37286
37287// StridedSliceGradEndMask sets the optional end_mask attribute to value.
37288// If not specified, defaults to 0
37289func StridedSliceGradEndMask(value int64) StridedSliceGradAttr {
37290	return func(m optionalAttr) {
37291		m["end_mask"] = value
37292	}
37293}
37294
37295// StridedSliceGradEllipsisMask sets the optional ellipsis_mask attribute to value.
37296// If not specified, defaults to 0
37297func StridedSliceGradEllipsisMask(value int64) StridedSliceGradAttr {
37298	return func(m optionalAttr) {
37299		m["ellipsis_mask"] = value
37300	}
37301}
37302
37303// StridedSliceGradNewAxisMask sets the optional new_axis_mask attribute to value.
37304// If not specified, defaults to 0
37305func StridedSliceGradNewAxisMask(value int64) StridedSliceGradAttr {
37306	return func(m optionalAttr) {
37307		m["new_axis_mask"] = value
37308	}
37309}
37310
37311// StridedSliceGradShrinkAxisMask sets the optional shrink_axis_mask attribute to value.
37312// If not specified, defaults to 0
37313func StridedSliceGradShrinkAxisMask(value int64) StridedSliceGradAttr {
37314	return func(m optionalAttr) {
37315		m["shrink_axis_mask"] = value
37316	}
37317}
37318
37319// Returns the gradient of `StridedSlice`.
37320//
37321// Since `StridedSlice` cuts out pieces of its `input` which is size
37322// `shape`, its gradient will have the same shape (which is passed here
37323// as `shape`). The gradient will be zero in any element that the slice
37324// does not select.
37325//
37326// Arguments are the same as StridedSliceGrad with the exception that
37327// `dy` is the input gradient to be propagated and `shape` is the
37328// shape of `StridedSlice`'s `input`.
37329func StridedSliceGrad(scope *Scope, shape tf.Output, begin tf.Output, end tf.Output, strides tf.Output, dy tf.Output, optional ...StridedSliceGradAttr) (output tf.Output) {
37330	if scope.Err() != nil {
37331		return
37332	}
37333	attrs := map[string]interface{}{}
37334	for _, a := range optional {
37335		a(attrs)
37336	}
37337	opspec := tf.OpSpec{
37338		Type: "StridedSliceGrad",
37339		Input: []tf.Input{
37340			shape, begin, end, strides, dy,
37341		},
37342		Attrs: attrs,
37343	}
37344	op := scope.AddOperation(opspec)
37345	return op.Output(0)
37346}
37347
37348// ResourceApplyFtrlAttr is an optional argument to ResourceApplyFtrl.
37349type ResourceApplyFtrlAttr func(optionalAttr)
37350
37351// ResourceApplyFtrlUseLocking sets the optional use_locking attribute to value.
37352//
37353// value: If `True`, updating of the var and accum tensors will be protected
37354// by a lock; otherwise the behavior is undefined, but may exhibit less
37355// contention.
37356// If not specified, defaults to false
37357func ResourceApplyFtrlUseLocking(value bool) ResourceApplyFtrlAttr {
37358	return func(m optionalAttr) {
37359		m["use_locking"] = value
37360	}
37361}
37362
37363// ResourceApplyFtrlMultiplyLinearByLr sets the optional multiply_linear_by_lr attribute to value.
37364// If not specified, defaults to false
37365func ResourceApplyFtrlMultiplyLinearByLr(value bool) ResourceApplyFtrlAttr {
37366	return func(m optionalAttr) {
37367		m["multiply_linear_by_lr"] = value
37368	}
37369}
37370
37371// Update '*var' according to the Ftrl-proximal scheme.
37372//
37373// accum_new = accum + grad * grad
37374// linear += grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
37375// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
37376// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
37377// accum = accum_new
37378//
37379// Arguments:
37380//	var_: Should be from a Variable().
37381//	accum: Should be from a Variable().
37382//	linear: Should be from a Variable().
37383//	grad: The gradient.
37384//	lr: Scaling factor. Must be a scalar.
37385//	l1: L1 regularization. Must be a scalar.
37386//	l2: L2 regularization. Must be a scalar.
37387//	lr_power: Scaling factor. Must be a scalar.
37388//
37389// Returns the created operation.
37390func ResourceApplyFtrl(scope *Scope, var_ tf.Output, accum tf.Output, linear tf.Output, grad tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, lr_power tf.Output, optional ...ResourceApplyFtrlAttr) (o *tf.Operation) {
37391	if scope.Err() != nil {
37392		return
37393	}
37394	attrs := map[string]interface{}{}
37395	for _, a := range optional {
37396		a(attrs)
37397	}
37398	opspec := tf.OpSpec{
37399		Type: "ResourceApplyFtrl",
37400		Input: []tf.Input{
37401			var_, accum, linear, grad, lr, l1, l2, lr_power,
37402		},
37403		Attrs: attrs,
37404	}
37405	return scope.AddOperation(opspec)
37406}
37407
37408// Creates a dataset that contains the unique elements of `input_dataset`.
37409func ExperimentalUniqueDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
37410	if scope.Err() != nil {
37411		return
37412	}
37413	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
37414	opspec := tf.OpSpec{
37415		Type: "ExperimentalUniqueDataset",
37416		Input: []tf.Input{
37417			input_dataset,
37418		},
37419		Attrs: attrs,
37420	}
37421	op := scope.AddOperation(opspec)
37422	return op.Output(0)
37423}
37424
37425// StringFormatAttr is an optional argument to StringFormat.
37426type StringFormatAttr func(optionalAttr)
37427
37428// StringFormatTemplate sets the optional template attribute to value.
37429//
37430// value: A string, the template to format tensor summaries into.
37431// If not specified, defaults to "%s"
37432func StringFormatTemplate(value string) StringFormatAttr {
37433	return func(m optionalAttr) {
37434		m["template"] = value
37435	}
37436}
37437
37438// StringFormatPlaceholder sets the optional placeholder attribute to value.
37439//
37440// value: A string, at each placeholder in the template a subsequent tensor summary will be inserted.
37441// If not specified, defaults to "%s"
37442func StringFormatPlaceholder(value string) StringFormatAttr {
37443	return func(m optionalAttr) {
37444		m["placeholder"] = value
37445	}
37446}
37447
37448// StringFormatSummarize sets the optional summarize attribute to value.
37449//
37450// value: When formatting the tensor summaries print the first and last summarize entries of each tensor dimension.
37451// If not specified, defaults to 3
37452func StringFormatSummarize(value int64) StringFormatAttr {
37453	return func(m optionalAttr) {
37454		m["summarize"] = value
37455	}
37456}
37457
37458// Formats a string template using a list of tensors.
37459//
37460// Formats a string template using a list of tensors, pretty-printing tensor summaries.
37461//
37462// Arguments:
37463//	inputs: The list of tensors to format into the placeholder string.
37464//
37465// Returns = The resulting string scalar.
37466func StringFormat(scope *Scope, inputs []tf.Output, optional ...StringFormatAttr) (output tf.Output) {
37467	if scope.Err() != nil {
37468		return
37469	}
37470	attrs := map[string]interface{}{}
37471	for _, a := range optional {
37472		a(attrs)
37473	}
37474	opspec := tf.OpSpec{
37475		Type: "StringFormat",
37476		Input: []tf.Input{
37477			tf.OutputList(inputs),
37478		},
37479		Attrs: attrs,
37480	}
37481	op := scope.AddOperation(opspec)
37482	return op.Output(0)
37483}
37484
37485// CudnnRNNAttr is an optional argument to CudnnRNN.
37486type CudnnRNNAttr func(optionalAttr)
37487
37488// CudnnRNNRnnMode sets the optional rnn_mode attribute to value.
37489// If not specified, defaults to "lstm"
37490func CudnnRNNRnnMode(value string) CudnnRNNAttr {
37491	return func(m optionalAttr) {
37492		m["rnn_mode"] = value
37493	}
37494}
37495
37496// CudnnRNNInputMode sets the optional input_mode attribute to value.
37497// If not specified, defaults to "linear_input"
37498func CudnnRNNInputMode(value string) CudnnRNNAttr {
37499	return func(m optionalAttr) {
37500		m["input_mode"] = value
37501	}
37502}
37503
37504// CudnnRNNDirection sets the optional direction attribute to value.
37505// If not specified, defaults to "unidirectional"
37506func CudnnRNNDirection(value string) CudnnRNNAttr {
37507	return func(m optionalAttr) {
37508		m["direction"] = value
37509	}
37510}
37511
37512// CudnnRNNDropout sets the optional dropout attribute to value.
37513// If not specified, defaults to 0
37514func CudnnRNNDropout(value float32) CudnnRNNAttr {
37515	return func(m optionalAttr) {
37516		m["dropout"] = value
37517	}
37518}
37519
37520// CudnnRNNSeed sets the optional seed attribute to value.
37521// If not specified, defaults to 0
37522func CudnnRNNSeed(value int64) CudnnRNNAttr {
37523	return func(m optionalAttr) {
37524		m["seed"] = value
37525	}
37526}
37527
37528// CudnnRNNSeed2 sets the optional seed2 attribute to value.
37529// If not specified, defaults to 0
37530func CudnnRNNSeed2(value int64) CudnnRNNAttr {
37531	return func(m optionalAttr) {
37532		m["seed2"] = value
37533	}
37534}
37535
37536// CudnnRNNIsTraining sets the optional is_training attribute to value.
37537// If not specified, defaults to true
37538func CudnnRNNIsTraining(value bool) CudnnRNNAttr {
37539	return func(m optionalAttr) {
37540		m["is_training"] = value
37541	}
37542}
37543
37544// A RNN backed by cuDNN.
37545//
37546// Computes the RNN from the input and initial states, with respect to the params
37547// buffer.
37548//
37549// rnn_mode: Indicates the type of the RNN model.
37550// input_mode: Indicate whether there is a linear projection between the input and
37551//   the actual computation before the first layer. 'skip_input' is only allowed
37552//   when input_size == num_units; 'auto_select' implies 'skip_input' when
37553//   input_size == num_units; otherwise, it implies 'linear_input'.
37554// direction: Indicates whether a bidirectional model will be used. Should be
37555//   "unidirectional" or "bidirectional".
37556// dropout: Dropout probability. When set to 0., dropout is disabled.
37557// seed: The 1st part of a seed to initialize dropout.
37558// seed2: The 2nd part of a seed to initialize dropout.
37559// input: A 3-D tensor with the shape of [seq_length, batch_size, input_size].
37560// input_h: A 3-D tensor with the shape of [num_layer * dir, batch_size,
37561//     num_units].
37562// input_c: For LSTM, a 3-D tensor with the shape of
37563//     [num_layer * dir, batch, num_units]. For other models, it is ignored.
37564// params: A 1-D tensor that contains the weights and biases in an opaque layout.
37565//     The size must be created through CudnnRNNParamsSize, and initialized
37566//     separately. Note that they might not be compatible across different
37567//     generations. So it is a good idea to save and restore
37568// output: A 3-D tensor with the shape of [seq_length, batch_size,
37569//     dir * num_units].
37570// output_h: The same shape has input_h.
37571// output_c: The same shape as input_c for LSTM. An empty tensor for other models.
37572// is_training: Indicates whether this operation is used for inference or
37573//   training.
37574// reserve_space: An opaque tensor that can be used in backprop calculation. It
37575//   is only produced if is_training is false.
37576func CudnnRNN(scope *Scope, input tf.Output, input_h tf.Output, input_c tf.Output, params tf.Output, optional ...CudnnRNNAttr) (output tf.Output, output_h tf.Output, output_c tf.Output, reserve_space tf.Output) {
37577	if scope.Err() != nil {
37578		return
37579	}
37580	attrs := map[string]interface{}{}
37581	for _, a := range optional {
37582		a(attrs)
37583	}
37584	opspec := tf.OpSpec{
37585		Type: "CudnnRNN",
37586		Input: []tf.Input{
37587			input, input_h, input_c, params,
37588		},
37589		Attrs: attrs,
37590	}
37591	op := scope.AddOperation(opspec)
37592	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
37593}
37594
37595// Creates a dataset that batches `batch_size` elements from `input_dataset`.
37596//
37597// Arguments:
37598//
37599//	batch_size: A scalar representing the number of elements to accumulate in a
37600// batch.
37601//
37602//
37603func BatchDataset(scope *Scope, input_dataset tf.Output, batch_size tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
37604	if scope.Err() != nil {
37605		return
37606	}
37607	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
37608	opspec := tf.OpSpec{
37609		Type: "BatchDataset",
37610		Input: []tf.Input{
37611			input_dataset, batch_size,
37612		},
37613		Attrs: attrs,
37614	}
37615	op := scope.AddOperation(opspec)
37616	return op.Output(0)
37617}
37618
37619// Computes fingerprints of the input strings.
37620//
37621// Arguments:
37622//	input: vector of strings to compute fingerprints on.
37623//
37624// Returns a (N,2) shaped matrix where N is the number of elements in the input
37625// vector. Each row contains the low and high parts of the fingerprint.
37626func SdcaFprint(scope *Scope, input tf.Output) (output tf.Output) {
37627	if scope.Err() != nil {
37628		return
37629	}
37630	opspec := tf.OpSpec{
37631		Type: "SdcaFprint",
37632		Input: []tf.Input{
37633			input,
37634		},
37635	}
37636	op := scope.AddOperation(opspec)
37637	return op.Output(0)
37638}
37639
37640// FusedBatchNormGradV3Attr is an optional argument to FusedBatchNormGradV3.
37641type FusedBatchNormGradV3Attr func(optionalAttr)
37642
37643// FusedBatchNormGradV3Epsilon sets the optional epsilon attribute to value.
37644//
37645// value: A small float number added to the variance of x.
37646// If not specified, defaults to 0.0001
37647func FusedBatchNormGradV3Epsilon(value float32) FusedBatchNormGradV3Attr {
37648	return func(m optionalAttr) {
37649		m["epsilon"] = value
37650	}
37651}
37652
37653// FusedBatchNormGradV3DataFormat sets the optional data_format attribute to value.
37654//
37655// value: The data format for y_backprop, x, x_backprop.
37656// Either "NHWC" (default) or "NCHW".
37657// If not specified, defaults to "NHWC"
37658func FusedBatchNormGradV3DataFormat(value string) FusedBatchNormGradV3Attr {
37659	return func(m optionalAttr) {
37660		m["data_format"] = value
37661	}
37662}
37663
37664// FusedBatchNormGradV3IsTraining sets the optional is_training attribute to value.
37665//
37666// value: A bool value to indicate the operation is for training (default)
37667// or inference.
37668// If not specified, defaults to true
37669func FusedBatchNormGradV3IsTraining(value bool) FusedBatchNormGradV3Attr {
37670	return func(m optionalAttr) {
37671		m["is_training"] = value
37672	}
37673}
37674
37675// Gradient for batch normalization.
37676//
37677// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
37678// The size of 1D Tensors matches the dimension C of the 4D Tensors.
37679//
37680// Arguments:
37681//	y_backprop: A 4D Tensor for the gradient with respect to y.
37682//	x: A 4D Tensor for input data.
37683//	scale: A 1D Tensor for scaling factor, to scale the normalized x.
37684//	reserve_space_1: When is_training is True, a 1D Tensor for the computed batch
37685// mean to be reused in gradient computation. When is_training is
37686// False, a 1D Tensor for the population mean to be reused in both
37687// 1st and 2nd order gradient computation.
37688//	reserve_space_2: When is_training is True, a 1D Tensor for the computed batch
37689// variance (inverted variance in the cuDNN case) to be reused in
37690// gradient computation. When is_training is False, a 1D Tensor
37691// for the population variance to be reused in both 1st and 2nd
37692// order gradient computation.
37693//	reserve_space_3: When is_training is True, a 1D Tensor for some intermediate results to be reused
37694// in gradient computation. When is_training is False, a dummy empty Tensor will be
37695// created.
37696//
37697// Returns:
37698//	x_backprop: A 4D Tensor for the gradient with respect to x.
37699//	scale_backprop: A 1D Tensor for the gradient with respect to scale.
37700//	offset_backprop: A 1D Tensor for the gradient with respect to offset.
37701//	reserve_space_4: Unused placeholder to match the mean input in FusedBatchNorm.
37702//	reserve_space_5: Unused placeholder to match the variance input
37703// in FusedBatchNorm.
37704func FusedBatchNormGradV3(scope *Scope, y_backprop tf.Output, x tf.Output, scale tf.Output, reserve_space_1 tf.Output, reserve_space_2 tf.Output, reserve_space_3 tf.Output, optional ...FusedBatchNormGradV3Attr) (x_backprop tf.Output, scale_backprop tf.Output, offset_backprop tf.Output, reserve_space_4 tf.Output, reserve_space_5 tf.Output) {
37705	if scope.Err() != nil {
37706		return
37707	}
37708	attrs := map[string]interface{}{}
37709	for _, a := range optional {
37710		a(attrs)
37711	}
37712	opspec := tf.OpSpec{
37713		Type: "FusedBatchNormGradV3",
37714		Input: []tf.Input{
37715			y_backprop, x, scale, reserve_space_1, reserve_space_2, reserve_space_3,
37716		},
37717		Attrs: attrs,
37718	}
37719	op := scope.AddOperation(opspec)
37720	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
37721}
37722
37723// Returns the number of records this Reader has produced.
37724//
37725// This is the same as the number of ReaderRead executions that have
37726// succeeded.
37727//
37728// Arguments:
37729//	reader_handle: Handle to a Reader.
37730func ReaderNumRecordsProducedV2(scope *Scope, reader_handle tf.Output) (records_produced tf.Output) {
37731	if scope.Err() != nil {
37732		return
37733	}
37734	opspec := tf.OpSpec{
37735		Type: "ReaderNumRecordsProducedV2",
37736		Input: []tf.Input{
37737			reader_handle,
37738		},
37739	}
37740	op := scope.AddOperation(opspec)
37741	return op.Output(0)
37742}
37743
37744// DecodeRawAttr is an optional argument to DecodeRaw.
37745type DecodeRawAttr func(optionalAttr)
37746
37747// DecodeRawLittleEndian sets the optional little_endian attribute to value.
37748//
37749// value: Whether the input `bytes` are in little-endian order.
37750// Ignored for `out_type` values that are stored in a single byte like
37751// `uint8`.
37752// If not specified, defaults to true
37753func DecodeRawLittleEndian(value bool) DecodeRawAttr {
37754	return func(m optionalAttr) {
37755		m["little_endian"] = value
37756	}
37757}
37758
37759// Reinterpret the bytes of a string as a vector of numbers.
37760//
37761// Arguments:
37762//	bytes: All the elements must have the same length.
37763//
37764//
37765// Returns A Tensor with one more dimension than the input `bytes`.  The
37766// added dimension will have size equal to the length of the elements
37767// of `bytes` divided by the number of bytes to represent `out_type`.
37768func DecodeRaw(scope *Scope, bytes tf.Output, out_type tf.DataType, optional ...DecodeRawAttr) (output tf.Output) {
37769	if scope.Err() != nil {
37770		return
37771	}
37772	attrs := map[string]interface{}{"out_type": out_type}
37773	for _, a := range optional {
37774		a(attrs)
37775	}
37776	opspec := tf.OpSpec{
37777		Type: "DecodeRaw",
37778		Input: []tf.Input{
37779			bytes,
37780		},
37781		Attrs: attrs,
37782	}
37783	op := scope.AddOperation(opspec)
37784	return op.Output(0)
37785}
37786
37787// AvgPool3DAttr is an optional argument to AvgPool3D.
37788type AvgPool3DAttr func(optionalAttr)
37789
37790// AvgPool3DDataFormat sets the optional data_format attribute to value.
37791//
37792// value: The data format of the input and output data. With the
37793// default format "NDHWC", the data is stored in the order of:
37794//     [batch, in_depth, in_height, in_width, in_channels].
37795// Alternatively, the format could be "NCDHW", the data storage order is:
37796//     [batch, in_channels, in_depth, in_height, in_width].
37797// If not specified, defaults to "NDHWC"
37798func AvgPool3DDataFormat(value string) AvgPool3DAttr {
37799	return func(m optionalAttr) {
37800		m["data_format"] = value
37801	}
37802}
37803
37804// Performs 3D average pooling on the input.
37805//
37806// Each entry in `output` is the mean of the corresponding size `ksize` window in
37807// `value`.
37808//
37809// Arguments:
37810//	input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over.
37811//	ksize: 1-D tensor of length 5. The size of the window for each dimension of
37812// the input tensor. Must have `ksize[0] = ksize[4] = 1`.
37813//	strides: 1-D tensor of length 5. The stride of the sliding window for each
37814// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
37815//	padding: The type of padding algorithm to use.
37816//
37817// Returns The average pooled output tensor.
37818func AvgPool3D(scope *Scope, input tf.Output, ksize []int64, strides []int64, padding string, optional ...AvgPool3DAttr) (output tf.Output) {
37819	if scope.Err() != nil {
37820		return
37821	}
37822	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
37823	for _, a := range optional {
37824		a(attrs)
37825	}
37826	opspec := tf.OpSpec{
37827		Type: "AvgPool3D",
37828		Input: []tf.Input{
37829			input,
37830		},
37831		Attrs: attrs,
37832	}
37833	op := scope.AddOperation(opspec)
37834	return op.Output(0)
37835}
37836
37837// QueueDequeueUpToV2Attr is an optional argument to QueueDequeueUpToV2.
37838type QueueDequeueUpToV2Attr func(optionalAttr)
37839
37840// QueueDequeueUpToV2TimeoutMs sets the optional timeout_ms attribute to value.
37841//
37842// value: If the queue has fewer than n elements, this operation
37843// will block for up to timeout_ms milliseconds.
37844// Note: This option is not supported yet.
37845// If not specified, defaults to -1
37846func QueueDequeueUpToV2TimeoutMs(value int64) QueueDequeueUpToV2Attr {
37847	return func(m optionalAttr) {
37848		m["timeout_ms"] = value
37849	}
37850}
37851
37852// Dequeues `n` tuples of one or more tensors from the given queue.
37853//
37854// This operation is not supported by all queues.  If a queue does not support
37855// DequeueUpTo, then an Unimplemented error is returned.
37856//
37857// If the queue is closed and there are more than 0 but less than `n`
37858// elements remaining, then instead of returning an OutOfRange error like
37859// QueueDequeueMany, less than `n` elements are returned immediately.  If
37860// the queue is closed and there are 0 elements left in the queue, then
37861// an OutOfRange error is returned just like in QueueDequeueMany.
37862// Otherwise the behavior is identical to QueueDequeueMany:
37863//
37864// This operation concatenates queue-element component tensors along the
37865// 0th dimension to make a single component tensor.  All of the components
37866// in the dequeued tuple will have size n in the 0th dimension.
37867//
37868// This operation has `k` outputs, where `k` is the number of components in
37869// the tuples stored in the given queue, and output `i` is the ith
37870// component of the dequeued tuple.
37871//
37872// Arguments:
37873//	handle: The handle to a queue.
37874//	n: The number of tuples to dequeue.
37875//	component_types: The type of each component in a tuple.
37876//
37877// Returns One or more tensors that were dequeued as a tuple.
37878func QueueDequeueUpToV2(scope *Scope, handle tf.Output, n tf.Output, component_types []tf.DataType, optional ...QueueDequeueUpToV2Attr) (components []tf.Output) {
37879	if scope.Err() != nil {
37880		return
37881	}
37882	attrs := map[string]interface{}{"component_types": component_types}
37883	for _, a := range optional {
37884		a(attrs)
37885	}
37886	opspec := tf.OpSpec{
37887		Type: "QueueDequeueUpToV2",
37888		Input: []tf.Input{
37889			handle, n,
37890		},
37891		Attrs: attrs,
37892	}
37893	op := scope.AddOperation(opspec)
37894	if scope.Err() != nil {
37895		return
37896	}
37897	var idx int
37898	var err error
37899	if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
37900		scope.UpdateErr("QueueDequeueUpToV2", err)
37901		return
37902	}
37903	return components
37904}
37905
37906// Converts a SparseTensor to a (possibly batched) CSRSparseMatrix.
37907//
37908// Arguments:
37909//	indices: SparseTensor indices.
37910//	values: SparseTensor values.
37911//	dense_shape: SparseTensor dense shape.
37912//
37913// Returns A (possibly batched) CSRSparseMatrix.
37914func SparseTensorToCSRSparseMatrix(scope *Scope, indices tf.Output, values tf.Output, dense_shape tf.Output) (sparse_matrix tf.Output) {
37915	if scope.Err() != nil {
37916		return
37917	}
37918	opspec := tf.OpSpec{
37919		Type: "SparseTensorToCSRSparseMatrix",
37920		Input: []tf.Input{
37921			indices, values, dense_shape,
37922		},
37923	}
37924	op := scope.AddOperation(opspec)
37925	return op.Output(0)
37926}
37927
37928// Computes the product along segments of a tensor.
37929//
37930// Read
37931// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
37932// for an explanation of segments.
37933//
37934// This operator is similar to the unsorted segment sum operator found
37935// [(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).
37936// Instead of computing the sum over segments, it computes the product of all
37937// entries belonging to a segment such that:
37938//
37939// \\(output_i = \prod_{j...} data[j...]\\) where the product is over tuples
37940// `j...` such that `segment_ids[j...] == i`.
37941//
37942// For example:
37943//
37944// ``` python
37945// c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]])
37946// tf.unsorted_segment_prod(c, tf.constant([0, 1, 0]), num_segments=2)
37947// # ==> [[ 4,  6, 6, 4],
37948// #       [5,  6, 7, 8]]
37949// ```
37950//
37951// If there is no entry for a given segment ID `i`, it outputs 1.
37952//
37953// If the given segment ID `i` is negative, then the corresponding value is
37954// dropped, and will not be included in the result.
37955//
37956// Arguments:
37957//
37958//	segment_ids: A tensor whose shape is a prefix of `data.shape`.
37959//
37960//
37961// Returns Has same shape as data, except for the first `segment_ids.rank`
37962// dimensions, which are replaced with a single dimension which has size
37963// `num_segments`.
37964func UnsortedSegmentProd(scope *Scope, data tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output) {
37965	if scope.Err() != nil {
37966		return
37967	}
37968	opspec := tf.OpSpec{
37969		Type: "UnsortedSegmentProd",
37970		Input: []tf.Input{
37971			data, segment_ids, num_segments,
37972		},
37973	}
37974	op := scope.AddOperation(opspec)
37975	return op.Output(0)
37976}
37977
37978// AngleAttr is an optional argument to Angle.
37979type AngleAttr func(optionalAttr)
37980
37981// AngleTout sets the optional Tout attribute to value.
37982// If not specified, defaults to DT_FLOAT
37983func AngleTout(value tf.DataType) AngleAttr {
37984	return func(m optionalAttr) {
37985		m["Tout"] = value
37986	}
37987}
37988
37989// Returns the argument of a complex number.
37990//
37991// Given a tensor `input` of complex numbers, this operation returns a tensor of
37992// type `float` that is the argument of each element in `input`. All elements in
37993// `input` must be complex numbers of the form \\(a + bj\\), where *a*
37994// is the real part and *b* is the imaginary part.
37995//
37996// The argument returned by this operation is of the form \\(atan2(b, a)\\).
37997//
37998// For example:
37999//
38000// ```
38001// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
38002// tf.angle(input) ==> [2.0132, 1.056]
38003// ```
38004//
38005// @compatibility(numpy)
38006// Equivalent to np.angle.
38007// @end_compatibility
38008func Angle(scope *Scope, input tf.Output, optional ...AngleAttr) (output tf.Output) {
38009	if scope.Err() != nil {
38010		return
38011	}
38012	attrs := map[string]interface{}{}
38013	for _, a := range optional {
38014		a(attrs)
38015	}
38016	opspec := tf.OpSpec{
38017		Type: "Angle",
38018		Input: []tf.Input{
38019			input,
38020		},
38021		Attrs: attrs,
38022	}
38023	op := scope.AddOperation(opspec)
38024	return op.Output(0)
38025}
38026
38027// Computes natural logarithm of x element-wise.
38028//
38029// I.e., \\(y = \log_e x\\).
38030//
38031// Example:
38032//
38033// ```python
38034// x = tf.constant([0, 0.5, 1, 5])
38035// tf.math.log(x) ==> [-inf, -0.6931472,  0. ,  1.609438]
38036// ```
38037func Log(scope *Scope, x tf.Output) (y tf.Output) {
38038	if scope.Err() != nil {
38039		return
38040	}
38041	opspec := tf.OpSpec{
38042		Type: "Log",
38043		Input: []tf.Input{
38044			x,
38045		},
38046	}
38047	op := scope.AddOperation(opspec)
38048	return op.Output(0)
38049}
38050
38051// IRFFT2DAttr is an optional argument to IRFFT2D.
38052type IRFFT2DAttr func(optionalAttr)
38053
38054// IRFFT2DTreal sets the optional Treal attribute to value.
38055// If not specified, defaults to DT_FLOAT
38056func IRFFT2DTreal(value tf.DataType) IRFFT2DAttr {
38057	return func(m optionalAttr) {
38058		m["Treal"] = value
38059	}
38060}
38061
38062// Inverse 2D real-valued fast Fourier transform.
38063//
38064// Computes the inverse 2-dimensional discrete Fourier transform of a real-valued
38065// signal over the inner-most 2 dimensions of `input`.
38066//
38067// The inner-most 2 dimensions of `input` are assumed to be the result of `RFFT2D`:
38068// The inner-most dimension contains the `fft_length / 2 + 1` unique components of
38069// the DFT of a real-valued signal. If `fft_length` is not provided, it is computed
38070// from the size of the inner-most 2 dimensions of `input`. If the FFT length used
38071// to compute `input` is odd, it should be provided since it cannot be inferred
38072// properly.
38073//
38074// Along each axis `IRFFT2D` is computed on, if `fft_length` (or
38075// `fft_length / 2 + 1` for the inner-most dimension) is smaller than the
38076// corresponding dimension of `input`, the dimension is cropped. If it is larger,
38077// the dimension is padded with zeros.
38078//
38079// Arguments:
38080//	input: A complex tensor.
38081//	fft_length: An int32 tensor of shape [2]. The FFT length for each dimension.
38082//
38083// Returns A float32 tensor of the same rank as `input`. The inner-most 2
38084//   dimensions of `input` are replaced with the `fft_length` samples of their
38085//   inverse 2D Fourier transform.
38086//
38087// @compatibility(numpy)
38088// Equivalent to np.fft.irfft2
38089// @end_compatibility
38090func IRFFT2D(scope *Scope, input tf.Output, fft_length tf.Output, optional ...IRFFT2DAttr) (output tf.Output) {
38091	if scope.Err() != nil {
38092		return
38093	}
38094	attrs := map[string]interface{}{}
38095	for _, a := range optional {
38096		a(attrs)
38097	}
38098	opspec := tf.OpSpec{
38099		Type: "IRFFT2D",
38100		Input: []tf.Input{
38101			input, fft_length,
38102		},
38103		Attrs: attrs,
38104	}
38105	op := scope.AddOperation(opspec)
38106	return op.Output(0)
38107}
38108
38109// ResizeBicubicAttr is an optional argument to ResizeBicubic.
38110type ResizeBicubicAttr func(optionalAttr)
38111
38112// ResizeBicubicAlignCorners sets the optional align_corners attribute to value.
38113//
38114// value: If true, the centers of the 4 corner pixels of the input and output tensors are
38115// aligned, preserving the values at the corner pixels. Defaults to false.
38116// If not specified, defaults to false
38117func ResizeBicubicAlignCorners(value bool) ResizeBicubicAttr {
38118	return func(m optionalAttr) {
38119		m["align_corners"] = value
38120	}
38121}
38122
38123// ResizeBicubicHalfPixelCenters sets the optional half_pixel_centers attribute to value.
38124// If not specified, defaults to false
38125func ResizeBicubicHalfPixelCenters(value bool) ResizeBicubicAttr {
38126	return func(m optionalAttr) {
38127		m["half_pixel_centers"] = value
38128	}
38129}
38130
38131// Resize `images` to `size` using bicubic interpolation.
38132//
38133// Input images can be of different types but output images are always float.
38134//
38135// Arguments:
38136//	images: 4-D with shape `[batch, height, width, channels]`.
38137//	size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
38138// new size for the images.
38139//
38140// Returns 4-D with shape
38141// `[batch, new_height, new_width, channels]`.
38142func ResizeBicubic(scope *Scope, images tf.Output, size tf.Output, optional ...ResizeBicubicAttr) (resized_images tf.Output) {
38143	if scope.Err() != nil {
38144		return
38145	}
38146	attrs := map[string]interface{}{}
38147	for _, a := range optional {
38148		a(attrs)
38149	}
38150	opspec := tf.OpSpec{
38151		Type: "ResizeBicubic",
38152		Input: []tf.Input{
38153			images, size,
38154		},
38155		Attrs: attrs,
38156	}
38157	op := scope.AddOperation(opspec)
38158	return op.Output(0)
38159}
38160
38161// Computes the mean along sparse segments of a tensor.
38162//
38163// Like `SparseSegmentMean`, but allows missing ids in `segment_ids`. If an id is
38164// missing, the `output` tensor at that position will be zeroed.
38165//
38166// Read
38167// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
38168// for an explanation of segments.
38169//
38170// Arguments:
38171//
38172//	indices: A 1-D tensor. Has same rank as `segment_ids`.
38173//	segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
38174//	num_segments: Should equal the number of distinct segment IDs.
38175//
38176// Returns Has same shape as data, except for dimension 0 which has size
38177// `num_segments`.
38178func SparseSegmentMeanWithNumSegments(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output) {
38179	if scope.Err() != nil {
38180		return
38181	}
38182	opspec := tf.OpSpec{
38183		Type: "SparseSegmentMeanWithNumSegments",
38184		Input: []tf.Input{
38185			data, indices, segment_ids, num_segments,
38186		},
38187	}
38188	op := scope.AddOperation(opspec)
38189	return op.Output(0)
38190}
38191
38192// RFFT2DAttr is an optional argument to RFFT2D.
38193type RFFT2DAttr func(optionalAttr)
38194
38195// RFFT2DTcomplex sets the optional Tcomplex attribute to value.
38196// If not specified, defaults to DT_COMPLEX64
38197func RFFT2DTcomplex(value tf.DataType) RFFT2DAttr {
38198	return func(m optionalAttr) {
38199		m["Tcomplex"] = value
38200	}
38201}
38202
38203// 2D real-valued fast Fourier transform.
38204//
38205// Computes the 2-dimensional discrete Fourier transform of a real-valued signal
38206// over the inner-most 2 dimensions of `input`.
38207//
38208// Since the DFT of a real signal is Hermitian-symmetric, `RFFT2D` only returns the
38209// `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension
38210// of `output`: the zero-frequency term, followed by the `fft_length / 2`
38211// positive-frequency terms.
38212//
38213// Along each axis `RFFT2D` is computed on, if `fft_length` is smaller than the
38214// corresponding dimension of `input`, the dimension is cropped. If it is larger,
38215// the dimension is padded with zeros.
38216//
38217// Arguments:
38218//	input: A float32 tensor.
38219//	fft_length: An int32 tensor of shape [2]. The FFT length for each dimension.
38220//
38221// Returns A complex64 tensor of the same rank as `input`. The inner-most 2
38222//   dimensions of `input` are replaced with their 2D Fourier transform. The
38223//   inner-most dimension contains `fft_length / 2 + 1` unique frequency
38224//   components.
38225//
38226// @compatibility(numpy)
38227// Equivalent to np.fft.rfft2
38228// @end_compatibility
38229func RFFT2D(scope *Scope, input tf.Output, fft_length tf.Output, optional ...RFFT2DAttr) (output tf.Output) {
38230	if scope.Err() != nil {
38231		return
38232	}
38233	attrs := map[string]interface{}{}
38234	for _, a := range optional {
38235		a(attrs)
38236	}
38237	opspec := tf.OpSpec{
38238		Type: "RFFT2D",
38239		Input: []tf.Input{
38240			input, fft_length,
38241		},
38242		Attrs: attrs,
38243	}
38244	op := scope.AddOperation(opspec)
38245	return op.Output(0)
38246}
38247
38248// RFFTAttr is an optional argument to RFFT.
38249type RFFTAttr func(optionalAttr)
38250
38251// RFFTTcomplex sets the optional Tcomplex attribute to value.
38252// If not specified, defaults to DT_COMPLEX64
38253func RFFTTcomplex(value tf.DataType) RFFTAttr {
38254	return func(m optionalAttr) {
38255		m["Tcomplex"] = value
38256	}
38257}
38258
38259// Real-valued fast Fourier transform.
38260//
38261// Computes the 1-dimensional discrete Fourier transform of a real-valued signal
38262// over the inner-most dimension of `input`.
38263//
38264// Since the DFT of a real signal is Hermitian-symmetric, `RFFT` only returns the
38265// `fft_length / 2 + 1` unique components of the FFT: the zero-frequency term,
38266// followed by the `fft_length / 2` positive-frequency terms.
38267//
38268// Along the axis `RFFT` is computed on, if `fft_length` is smaller than the
38269// corresponding dimension of `input`, the dimension is cropped. If it is larger,
38270// the dimension is padded with zeros.
38271//
38272// Arguments:
38273//	input: A float32 tensor.
38274//	fft_length: An int32 tensor of shape [1]. The FFT length.
38275//
38276// Returns A complex64 tensor of the same rank as `input`. The inner-most
38277//   dimension of `input` is replaced with the `fft_length / 2 + 1` unique
38278//   frequency components of its 1D Fourier transform.
38279//
38280// @compatibility(numpy)
38281// Equivalent to np.fft.rfft
38282// @end_compatibility
38283func RFFT(scope *Scope, input tf.Output, fft_length tf.Output, optional ...RFFTAttr) (output tf.Output) {
38284	if scope.Err() != nil {
38285		return
38286	}
38287	attrs := map[string]interface{}{}
38288	for _, a := range optional {
38289		a(attrs)
38290	}
38291	opspec := tf.OpSpec{
38292		Type: "RFFT",
38293		Input: []tf.Input{
38294			input, fft_length,
38295		},
38296		Attrs: attrs,
38297	}
38298	op := scope.AddOperation(opspec)
38299	return op.Output(0)
38300}
38301
38302// 3D fast Fourier transform.
38303//
38304// Computes the 3-dimensional discrete Fourier transform over the inner-most 3
38305// dimensions of `input`.
38306//
38307// Arguments:
38308//	input: A complex tensor.
38309//
38310// Returns A complex tensor of the same shape as `input`. The inner-most 3
38311//   dimensions of `input` are replaced with their 3D Fourier transform.
38312//
38313// @compatibility(numpy)
38314// Equivalent to np.fft.fftn with 3 dimensions.
38315// @end_compatibility
38316func FFT3D(scope *Scope, input tf.Output) (output tf.Output) {
38317	if scope.Err() != nil {
38318		return
38319	}
38320	opspec := tf.OpSpec{
38321		Type: "FFT3D",
38322		Input: []tf.Input{
38323			input,
38324		},
38325	}
38326	op := scope.AddOperation(opspec)
38327	return op.Output(0)
38328}
38329
38330// Creates a dataset that passes a sliding window over `input_dataset`.
38331//
38332// Arguments:
38333//
38334//	window_size: A scalar representing the number of elements in the
38335// sliding window.
38336//	window_shift: A scalar representing the steps moving the sliding window
38337// forward in one iteration. It must be positive.
38338//	window_stride: A scalar representing the stride of the input elements of the sliding window.
38339// It must be positive.
38340//
38341//
38342func SlidingWindowDataset(scope *Scope, input_dataset tf.Output, window_size tf.Output, window_shift tf.Output, window_stride tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
38343	if scope.Err() != nil {
38344		return
38345	}
38346	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
38347	opspec := tf.OpSpec{
38348		Type: "SlidingWindowDataset",
38349		Input: []tf.Input{
38350			input_dataset, window_size, window_shift, window_stride,
38351		},
38352		Attrs: attrs,
38353	}
38354	op := scope.AddOperation(opspec)
38355	return op.Output(0)
38356}
38357
38358// Locks a mutex resource.  The output is the lock.  So long as the lock tensor
38359//
38360// is alive, any other request to use `MutexLock` with this mutex will wait.
38361//
38362// This is particularly useful for creating a critical section when used in
38363// conjunction with `MutexLockIdentity`:
38364//
38365// ```python
38366//
38367// mutex = mutex_v2(
38368//   shared_name=handle_name, container=container, name=name)
38369//
38370// def execute_in_critical_section(fn, *args, **kwargs):
38371//   lock = gen_resource_variable_ops.mutex_lock(mutex)
38372//
38373//   with ops.control_dependencies([lock]):
38374//     r = fn(*args, **kwargs)
38375//
38376//   with ops.control_dependencies(nest.flatten(r)):
38377//     with ops.colocate_with(mutex):
38378//       ensure_lock_exists = mutex_lock_identity(lock)
38379//
38380//     # Make sure that if any element of r is accessed, all of
38381//     # them are executed together.
38382//     r = nest.map_structure(tf.identity, r)
38383//
38384//   with ops.control_dependencies([ensure_lock_exists]):
38385//     return nest.map_structure(tf.identity, r)
38386// ```
38387//
38388// While `fn` is running in the critical section, no other functions which wish to
38389// use this critical section may run.
38390//
38391// Often the use case is that two executions of the same graph, in parallel,
38392// wish to run `fn`; and we wish to ensure that only one of them executes
38393// at a time.  This is especially important if `fn` modifies one or more
38394// variables at a time.
38395//
38396// It is also useful if two separate functions must share a resource, but we
38397// wish to ensure the usage is exclusive.
38398//
38399// Arguments:
38400//	mutex: The mutex resource to lock.
38401//
38402// Returns A tensor that keeps a shared pointer to a lock on the mutex;
38403// when the Tensor is destroyed, the use count on the shared pointer is decreased
38404// by 1.  When it reaches 0, the lock is released.
38405func MutexLock(scope *Scope, mutex tf.Output) (mutex_lock tf.Output) {
38406	if scope.Err() != nil {
38407		return
38408	}
38409	opspec := tf.OpSpec{
38410		Type: "MutexLock",
38411		Input: []tf.Input{
38412			mutex,
38413		},
38414	}
38415	op := scope.AddOperation(opspec)
38416	return op.Output(0)
38417}
38418
38419// MaxPoolGradWithArgmaxAttr is an optional argument to MaxPoolGradWithArgmax.
38420type MaxPoolGradWithArgmaxAttr func(optionalAttr)
38421
38422// MaxPoolGradWithArgmaxIncludeBatchInIndex sets the optional include_batch_in_index attribute to value.
38423//
38424// value: Whether to include batch dimension in flattened index of `argmax`.
38425// If not specified, defaults to false
38426func MaxPoolGradWithArgmaxIncludeBatchInIndex(value bool) MaxPoolGradWithArgmaxAttr {
38427	return func(m optionalAttr) {
38428		m["include_batch_in_index"] = value
38429	}
38430}
38431
38432// Computes gradients of the maxpooling function.
38433//
38434// Arguments:
38435//	input: The original input.
38436//	grad: 4-D with shape `[batch, height, width, channels]`.  Gradients w.r.t. the
38437// output of `max_pool`.
38438//	argmax: The indices of the maximum values chosen for each output of `max_pool`.
38439//	ksize: The size of the window for each dimension of the input tensor.
38440//	strides: The stride of the sliding window for each dimension of the
38441// input tensor.
38442//	padding: The type of padding algorithm to use.
38443//
38444// Returns Gradients w.r.t. the input of `max_pool`.
38445func MaxPoolGradWithArgmax(scope *Scope, input tf.Output, grad tf.Output, argmax tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPoolGradWithArgmaxAttr) (output tf.Output) {
38446	if scope.Err() != nil {
38447		return
38448	}
38449	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
38450	for _, a := range optional {
38451		a(attrs)
38452	}
38453	opspec := tf.OpSpec{
38454		Type: "MaxPoolGradWithArgmax",
38455		Input: []tf.Input{
38456			input, grad, argmax,
38457		},
38458		Attrs: attrs,
38459	}
38460	op := scope.AddOperation(opspec)
38461	return op.Output(0)
38462}
38463
38464// 2D fast Fourier transform.
38465//
38466// Computes the 2-dimensional discrete Fourier transform over the inner-most
38467// 2 dimensions of `input`.
38468//
38469// Arguments:
38470//	input: A complex tensor.
38471//
38472// Returns A complex tensor of the same shape as `input`. The inner-most 2
38473//   dimensions of `input` are replaced with their 2D Fourier transform.
38474//
38475// @compatibility(numpy)
38476// Equivalent to np.fft.fft2
38477// @end_compatibility
38478func FFT2D(scope *Scope, input tf.Output) (output tf.Output) {
38479	if scope.Err() != nil {
38480		return
38481	}
38482	opspec := tf.OpSpec{
38483		Type: "FFT2D",
38484		Input: []tf.Input{
38485			input,
38486		},
38487	}
38488	op := scope.AddOperation(opspec)
38489	return op.Output(0)
38490}
38491
38492// SdcaOptimizerAttr is an optional argument to SdcaOptimizer.
38493type SdcaOptimizerAttr func(optionalAttr)
38494
38495// SdcaOptimizerAdaptative sets the optional adaptative attribute to value.
38496//
38497// value: Whether to use Adaptive SDCA for the inner loop.
38498// If not specified, defaults to true
38499func SdcaOptimizerAdaptative(value bool) SdcaOptimizerAttr {
38500	return func(m optionalAttr) {
38501		m["adaptative"] = value
38502	}
38503}
38504
38505// Distributed version of Stochastic Dual Coordinate Ascent (SDCA) optimizer for
38506//
38507// linear models with L1 + L2 regularization. As global optimization objective is
38508// strongly-convex, the optimizer optimizes the dual objective at each step. The
38509// optimizer applies each update one example at a time. Examples are sampled
38510// uniformly, and the optimizer is learning rate free and enjoys linear convergence
38511// rate.
38512//
38513// [Proximal Stochastic Dual Coordinate Ascent](http://arxiv.org/pdf/1211.2717v1.pdf).<br>
38514// Shai Shalev-Shwartz, Tong Zhang. 2012
38515//
38516// $$Loss Objective = \sum f_{i} (wx_{i}) + (l2 / 2) * |w|^2 + l1 * |w|$$
38517//
38518// [Adding vs. Averaging in Distributed Primal-Dual Optimization](http://arxiv.org/abs/1502.03508).<br>
38519// Chenxin Ma, Virginia Smith, Martin Jaggi, Michael I. Jordan,
38520// Peter Richtarik, Martin Takac. 2015
38521//
38522// [Stochastic Dual Coordinate Ascent with Adaptive Probabilities](https://arxiv.org/abs/1502.08053).<br>
38523// Dominik Csiba, Zheng Qu, Peter Richtarik. 2015
38524//
38525// Arguments:
38526//	sparse_example_indices: a list of vectors which contain example indices.
38527//	sparse_feature_indices: a list of vectors which contain feature indices.
38528//	sparse_feature_values: a list of vectors which contains feature value
38529// associated with each feature group.
38530//	dense_features: a list of matrices which contains the dense feature values.
38531//	example_weights: a vector which contains the weight associated with each
38532// example.
38533//	example_labels: a vector which contains the label/target associated with each
38534// example.
38535//	sparse_indices: a list of vectors where each value is the indices which has
38536// corresponding weights in sparse_weights. This field maybe omitted for the
38537// dense approach.
38538//	sparse_weights: a list of vectors where each value is the weight associated with
38539// a sparse feature group.
38540//	dense_weights: a list of vectors where the values are the weights associated
38541// with a dense feature group.
38542//	example_state_data: a list of vectors containing the example state data.
38543//	loss_type: Type of the primal loss. Currently SdcaSolver supports logistic,
38544// squared and hinge losses.
38545//	l1: Symmetric l1 regularization strength.
38546//	l2: Symmetric l2 regularization strength.
38547//	num_loss_partitions: Number of partitions of the global loss function.
38548//	num_inner_iterations: Number of iterations per mini-batch.
38549//
38550// Returns:
38551//	out_example_state_data: a list of vectors containing the updated example state
38552// data.
38553//	out_delta_sparse_weights: a list of vectors where each value is the delta
38554// weights associated with a sparse feature group.
38555//	out_delta_dense_weights: a list of vectors where the values are the delta
38556// weights associated with a dense feature group.
38557func SdcaOptimizer(scope *Scope, sparse_example_indices []tf.Output, sparse_feature_indices []tf.Output, sparse_feature_values []tf.Output, dense_features []tf.Output, example_weights tf.Output, example_labels tf.Output, sparse_indices []tf.Output, sparse_weights []tf.Output, dense_weights []tf.Output, example_state_data tf.Output, loss_type string, l1 float32, l2 float32, num_loss_partitions int64, num_inner_iterations int64, optional ...SdcaOptimizerAttr) (out_example_state_data tf.Output, out_delta_sparse_weights []tf.Output, out_delta_dense_weights []tf.Output) {
38558	if scope.Err() != nil {
38559		return
38560	}
38561	attrs := map[string]interface{}{"loss_type": loss_type, "l1": l1, "l2": l2, "num_loss_partitions": num_loss_partitions, "num_inner_iterations": num_inner_iterations}
38562	for _, a := range optional {
38563		a(attrs)
38564	}
38565	opspec := tf.OpSpec{
38566		Type: "SdcaOptimizer",
38567		Input: []tf.Input{
38568			tf.OutputList(sparse_example_indices), tf.OutputList(sparse_feature_indices), tf.OutputList(sparse_feature_values), tf.OutputList(dense_features), example_weights, example_labels, tf.OutputList(sparse_indices), tf.OutputList(sparse_weights), tf.OutputList(dense_weights), example_state_data,
38569		},
38570		Attrs: attrs,
38571	}
38572	op := scope.AddOperation(opspec)
38573	if scope.Err() != nil {
38574		return
38575	}
38576	var idx int
38577	var err error
38578	out_example_state_data = op.Output(idx)
38579	if out_delta_sparse_weights, idx, err = makeOutputList(op, idx, "out_delta_sparse_weights"); err != nil {
38580		scope.UpdateErr("SdcaOptimizer", err)
38581		return
38582	}
38583	if out_delta_dense_weights, idx, err = makeOutputList(op, idx, "out_delta_dense_weights"); err != nil {
38584		scope.UpdateErr("SdcaOptimizer", err)
38585		return
38586	}
38587	return out_example_state_data, out_delta_sparse_weights, out_delta_dense_weights
38588}
38589
38590// Inverse fast Fourier transform.
38591//
38592// Computes the inverse 1-dimensional discrete Fourier transform over the
38593// inner-most dimension of `input`.
38594//
38595// Arguments:
38596//	input: A complex tensor.
38597//
38598// Returns A complex tensor of the same shape as `input`. The inner-most
38599//   dimension of `input` is replaced with its inverse 1D Fourier transform.
38600//
38601// @compatibility(numpy)
38602// Equivalent to np.fft.ifft
38603// @end_compatibility
38604func IFFT(scope *Scope, input tf.Output) (output tf.Output) {
38605	if scope.Err() != nil {
38606		return
38607	}
38608	opspec := tf.OpSpec{
38609		Type: "IFFT",
38610		Input: []tf.Input{
38611			input,
38612		},
38613	}
38614	op := scope.AddOperation(opspec)
38615	return op.Output(0)
38616}
38617
38618// CollectiveGatherAttr is an optional argument to CollectiveGather.
38619type CollectiveGatherAttr func(optionalAttr)
38620
38621// CollectiveGatherCommunicationHint sets the optional communication_hint attribute to value.
38622// If not specified, defaults to "auto"
38623func CollectiveGatherCommunicationHint(value string) CollectiveGatherAttr {
38624	return func(m optionalAttr) {
38625		m["communication_hint"] = value
38626	}
38627}
38628
38629// CollectiveGatherTimeoutSeconds sets the optional timeout_seconds attribute to value.
38630// If not specified, defaults to 0
38631func CollectiveGatherTimeoutSeconds(value float32) CollectiveGatherAttr {
38632	return func(m optionalAttr) {
38633		m["timeout_seconds"] = value
38634	}
38635}
38636
38637// Mutually accumulates multiple tensors of identical type and shape.
38638func CollectiveGather(scope *Scope, input tf.Output, group_size int64, group_key int64, instance_key int64, shape tf.Shape, optional ...CollectiveGatherAttr) (data tf.Output) {
38639	if scope.Err() != nil {
38640		return
38641	}
38642	attrs := map[string]interface{}{"group_size": group_size, "group_key": group_key, "instance_key": instance_key, "shape": shape}
38643	for _, a := range optional {
38644		a(attrs)
38645	}
38646	opspec := tf.OpSpec{
38647		Type: "CollectiveGather",
38648		Input: []tf.Input{
38649			input,
38650		},
38651		Attrs: attrs,
38652	}
38653	op := scope.AddOperation(opspec)
38654	return op.Output(0)
38655}
38656
38657// L2 Loss.
38658//
38659// Computes half the L2 norm of a tensor without the `sqrt`:
38660//
38661//     output = sum(t ** 2) / 2
38662//
38663// Arguments:
38664//	t: Typically 2-D, but may have any dimensions.
38665//
38666// Returns 0-D.
38667func L2Loss(scope *Scope, t tf.Output) (output tf.Output) {
38668	if scope.Err() != nil {
38669		return
38670	}
38671	opspec := tf.OpSpec{
38672		Type: "L2Loss",
38673		Input: []tf.Input{
38674			t,
38675		},
38676	}
38677	op := scope.AddOperation(opspec)
38678	return op.Output(0)
38679}
38680
38681// An op that receives embedding activations on the TPU.
38682//
38683// The TPU system performs the embedding lookups and aggregations specified by
38684// the arguments to TPUEmbeddingEnqueue(Integer/Sparse/SparseTensor)Batch. The
38685// results of these aggregations are visible to the Tensorflow Graph as the
38686// outputs of a RecvTPUEmbeddingActivations op. This op returns a list containing
38687// one Tensor of activations per table specified in the model. There can be at
38688// most one RecvTPUEmbeddingActivations op in the TPU graph.
38689//
38690// Arguments:
38691//	num_outputs: The number of output activation tensors, equal to the number of
38692// embedding tables in the model.
38693//	config: Serialized TPUEmbeddingConfiguration proto.
38694//
38695// Returns A TensorList of embedding activations containing one Tensor per
38696// embedding table in the model.
38697func RecvTPUEmbeddingActivations(scope *Scope, num_outputs int64, config string) (outputs []tf.Output) {
38698	if scope.Err() != nil {
38699		return
38700	}
38701	attrs := map[string]interface{}{"num_outputs": num_outputs, "config": config}
38702	opspec := tf.OpSpec{
38703		Type: "RecvTPUEmbeddingActivations",
38704
38705		Attrs: attrs,
38706	}
38707	op := scope.AddOperation(opspec)
38708	if scope.Err() != nil {
38709		return
38710	}
38711	var idx int
38712	var err error
38713	if outputs, idx, err = makeOutputList(op, idx, "outputs"); err != nil {
38714		scope.UpdateErr("RecvTPUEmbeddingActivations", err)
38715		return
38716	}
38717	return outputs
38718}
38719
38720// Reads out the CSR components at batch `index`.
38721//
38722// This op is meant only for debugging / testing, and its interface is not expected
38723// to be stable.
38724//
38725// Arguments:
38726//	csr_sparse_matrix: A batched CSRSparseMatrix.
38727//	index: The index in `csr_sparse_matrix`'s batch.
38728//
38729//
38730// Returns:
38731//	row_ptrs: An array containing CSR matrix row pointers.
38732//	col_inds: An array containing CSR matrix column indices.
38733//	values: An array containing CSR matrix nonzero values.
38734func CSRSparseMatrixComponents(scope *Scope, csr_sparse_matrix tf.Output, index tf.Output, type_ tf.DataType) (row_ptrs tf.Output, col_inds tf.Output, values tf.Output) {
38735	if scope.Err() != nil {
38736		return
38737	}
38738	attrs := map[string]interface{}{"type": type_}
38739	opspec := tf.OpSpec{
38740		Type: "CSRSparseMatrixComponents",
38741		Input: []tf.Input{
38742			csr_sparse_matrix, index,
38743		},
38744		Attrs: attrs,
38745	}
38746	op := scope.AddOperation(opspec)
38747	return op.Output(0), op.Output(1), op.Output(2)
38748}
38749
38750// InfeedEnqueuePrelinearizedBufferAttr is an optional argument to InfeedEnqueuePrelinearizedBuffer.
38751type InfeedEnqueuePrelinearizedBufferAttr func(optionalAttr)
38752
38753// InfeedEnqueuePrelinearizedBufferDeviceOrdinal sets the optional device_ordinal attribute to value.
38754//
38755// value: The TPU device to use. This should be -1 when the Op is running on a TPU device
38756// and = 0 when the Op is running on the CPU device.
38757// If not specified, defaults to -1
38758func InfeedEnqueuePrelinearizedBufferDeviceOrdinal(value int64) InfeedEnqueuePrelinearizedBufferAttr {
38759	return func(m optionalAttr) {
38760		m["device_ordinal"] = value
38761	}
38762}
38763
38764// An op which enqueues prelinearized buffer into TPU infeed.
38765//
38766// Arguments:
38767//	input: A variant tensor representing linearized output.
38768//
38769// Returns the created operation.
38770func InfeedEnqueuePrelinearizedBuffer(scope *Scope, input tf.Output, optional ...InfeedEnqueuePrelinearizedBufferAttr) (o *tf.Operation) {
38771	if scope.Err() != nil {
38772		return
38773	}
38774	attrs := map[string]interface{}{}
38775	for _, a := range optional {
38776		a(attrs)
38777	}
38778	opspec := tf.OpSpec{
38779		Type: "InfeedEnqueuePrelinearizedBuffer",
38780		Input: []tf.Input{
38781			input,
38782		},
38783		Attrs: attrs,
38784	}
38785	return scope.AddOperation(opspec)
38786}
38787
38788// Create a dense tensor from a ragged tensor, possibly altering its shape.
38789//
38790// The `ragged_to_dense` op creates a dense tensor from a list of row partition
38791// tensors, a value vector, and default values. If the shape is unspecified, the
38792// minimal shape required to contain all the elements in the ragged tensor (the
38793// natural shape) will be used. If some dimensions are left unspecified, then the
38794// size of the natural shape is used in that dimension.
38795//
38796// The default_value will be broadcast to the output shape. After that, the values
38797// from the ragged tensor overwrite the default values. Note that the default_value
38798// must have less dimensions than the value.
38799//
38800// The row partition tensors are in the order of the dimensions.
38801// At present, the types can be:
38802// * "ROW_SPLITS": the row_splits tensor from the ragged tensor.
38803// * "VALUE_ROWIDS": the value_rowids tensor from the ragged tensor.
38804// * "FIRST_DIM_SIZE": if value_rowids is used for the first dimension, then it
38805//   is preceded by "FIRST_DIM_SIZE".
38806//
38807// Arguments:
38808//	shape: The desired shape of the output tensor. If left unspecified (empty),
38809// the minimal shape required to contain all the elements in the ragged tensor
38810// (the natural shape) will be used. If some dimensions are left unspecified, then
38811// the size of the natural shape is used in that dimension.
38812//
38813// Note that dense dimensions cannot be modified by the shape argument. Trying to
38814// change the size of a dense dimension will cause the op to fail.
38815// Examples:
38816// natural shape: [4, 5, 6]
38817// shape: -1
38818// output shape: [4, 5, 6]
38819//
38820// natural shape: [4, 5, 6]
38821// shape: [3, -1, 2]
38822// output shape: [3, 5, 2]
38823//
38824// natural shape: [4, 5, 6]
38825// shape: [3, 7, 2]
38826// output shape: [3, 7, 2]
38827//
38828//	values: A 1D tensor representing the values of the ragged tensor.
38829//	default_value: The default_value when the shape is larger than the ragged tensor. The
38830// default_value is broadcast until it is the shape of the output tensor, and
38831// then overwritten by values in the ragged tensor. The default value must be
38832// compatible with this broadcast operation, and must have fewer dimensions than
38833// the value tensor.
38834//
38835//	row_partition_types: The types of the row partition tensors. At present, these can be:
38836// * "ROW_SPLITS": the row_splits tensor from the ragged tensor.
38837// * "VALUE_ROWIDS": the value_rowids tensor from the ragged tensor.
38838// * "FIRST_DIM_SIZE": if value_rowids is used for the first dimension, then it
38839//   is preceeded by "FIRST_DIM_SIZE".
38840// The tensors are in the order of the dimensions.
38841//
38842// Returns The resulting dense tensor.
38843func RaggedTensorToTensor(scope *Scope, shape tf.Output, values tf.Output, default_value tf.Output, row_partition_tensors []tf.Output, row_partition_types []string) (result tf.Output) {
38844	if scope.Err() != nil {
38845		return
38846	}
38847	attrs := map[string]interface{}{"row_partition_types": row_partition_types}
38848	opspec := tf.OpSpec{
38849		Type: "RaggedTensorToTensor",
38850		Input: []tf.Input{
38851			shape, values, default_value, tf.OutputList(row_partition_tensors),
38852		},
38853		Attrs: attrs,
38854	}
38855	op := scope.AddOperation(opspec)
38856	return op.Output(0)
38857}
38858
38859// LRNGradAttr is an optional argument to LRNGrad.
38860type LRNGradAttr func(optionalAttr)
38861
38862// LRNGradDepthRadius sets the optional depth_radius attribute to value.
38863//
38864// value: A depth radius.
38865// If not specified, defaults to 5
38866func LRNGradDepthRadius(value int64) LRNGradAttr {
38867	return func(m optionalAttr) {
38868		m["depth_radius"] = value
38869	}
38870}
38871
38872// LRNGradBias sets the optional bias attribute to value.
38873//
38874// value: An offset (usually > 0 to avoid dividing by 0).
38875// If not specified, defaults to 1
38876func LRNGradBias(value float32) LRNGradAttr {
38877	return func(m optionalAttr) {
38878		m["bias"] = value
38879	}
38880}
38881
38882// LRNGradAlpha sets the optional alpha attribute to value.
38883//
38884// value: A scale factor, usually positive.
38885// If not specified, defaults to 1
38886func LRNGradAlpha(value float32) LRNGradAttr {
38887	return func(m optionalAttr) {
38888		m["alpha"] = value
38889	}
38890}
38891
38892// LRNGradBeta sets the optional beta attribute to value.
38893//
38894// value: An exponent.
38895// If not specified, defaults to 0.5
38896func LRNGradBeta(value float32) LRNGradAttr {
38897	return func(m optionalAttr) {
38898		m["beta"] = value
38899	}
38900}
38901
38902// Gradients for Local Response Normalization.
38903//
38904// Arguments:
38905//	input_grads: 4-D with shape `[batch, height, width, channels]`.
38906//	input_image: 4-D with shape `[batch, height, width, channels]`.
38907//	output_image: 4-D with shape `[batch, height, width, channels]`.
38908//
38909// Returns The gradients for LRN.
38910func LRNGrad(scope *Scope, input_grads tf.Output, input_image tf.Output, output_image tf.Output, optional ...LRNGradAttr) (output tf.Output) {
38911	if scope.Err() != nil {
38912		return
38913	}
38914	attrs := map[string]interface{}{}
38915	for _, a := range optional {
38916		a(attrs)
38917	}
38918	opspec := tf.OpSpec{
38919		Type: "LRNGrad",
38920		Input: []tf.Input{
38921			input_grads, input_image, output_image,
38922		},
38923		Attrs: attrs,
38924	}
38925	op := scope.AddOperation(opspec)
38926	return op.Output(0)
38927}
38928
38929// PrelinearizeAttr is an optional argument to Prelinearize.
38930type PrelinearizeAttr func(optionalAttr)
38931
38932// PrelinearizeShape sets the optional shape attribute to value.
38933//
38934// value: The shape of the tensor.
38935// If not specified, defaults to <>
38936func PrelinearizeShape(value tf.Shape) PrelinearizeAttr {
38937	return func(m optionalAttr) {
38938		m["shape"] = value
38939	}
38940}
38941
38942// PrelinearizeLayout sets the optional layout attribute to value.
38943//
38944// value: A vector holding the requested layout in minor-to-major sequence. If a layout
38945// attribute is passed but its values are all -1 the layout will be computed by
38946// the infeed operation.
38947// If not specified, defaults to <>
38948func PrelinearizeLayout(value []int64) PrelinearizeAttr {
38949	return func(m optionalAttr) {
38950		m["layout"] = value
38951	}
38952}
38953
38954// An op which linearizes one Tensor value to an opaque variant tensor.
38955//
38956// Arguments:
38957//	input: A tensor that will be linearized.
38958func Prelinearize(scope *Scope, input tf.Output, optional ...PrelinearizeAttr) (output tf.Output) {
38959	if scope.Err() != nil {
38960		return
38961	}
38962	attrs := map[string]interface{}{}
38963	for _, a := range optional {
38964		a(attrs)
38965	}
38966	opspec := tf.OpSpec{
38967		Type: "Prelinearize",
38968		Input: []tf.Input{
38969			input,
38970		},
38971		Attrs: attrs,
38972	}
38973	op := scope.AddOperation(opspec)
38974	return op.Output(0)
38975}
38976
38977// StatefulUniformFullIntAttr is an optional argument to StatefulUniformFullInt.
38978type StatefulUniformFullIntAttr func(optionalAttr)
38979
38980// StatefulUniformFullIntDtype sets the optional dtype attribute to value.
38981//
38982// value: The type of the output.
38983// If not specified, defaults to DT_UINT64
38984func StatefulUniformFullIntDtype(value tf.DataType) StatefulUniformFullIntAttr {
38985	return func(m optionalAttr) {
38986		m["dtype"] = value
38987	}
38988}
38989
38990// Outputs random integers from a uniform distribution.
38991//
38992// The generated values are uniform integers covering the whole range of `dtype`.
38993//
38994// Arguments:
38995//	resource: The handle of the resource variable that stores the state of the RNG.
38996//	algorithm: The RNG algorithm.
38997//	shape: The shape of the output tensor.
38998//
38999// Returns Random values with specified shape.
39000func StatefulUniformFullInt(scope *Scope, resource tf.Output, algorithm tf.Output, shape tf.Output, optional ...StatefulUniformFullIntAttr) (output tf.Output) {
39001	if scope.Err() != nil {
39002		return
39003	}
39004	attrs := map[string]interface{}{}
39005	for _, a := range optional {
39006		a(attrs)
39007	}
39008	opspec := tf.OpSpec{
39009		Type: "StatefulUniformFullInt",
39010		Input: []tf.Input{
39011			resource, algorithm, shape,
39012		},
39013		Attrs: attrs,
39014	}
39015	op := scope.AddOperation(opspec)
39016	return op.Output(0)
39017}
39018
39019// Transforms a Tensor into a serialized TensorProto proto.
39020//
39021// Arguments:
39022//	tensor: A Tensor of type `T`.
39023//
39024// Returns A serialized TensorProto proto of the input tensor.
39025func SerializeTensor(scope *Scope, tensor tf.Output) (serialized tf.Output) {
39026	if scope.Err() != nil {
39027		return
39028	}
39029	opspec := tf.OpSpec{
39030		Type: "SerializeTensor",
39031		Input: []tf.Input{
39032			tensor,
39033		},
39034	}
39035	op := scope.AddOperation(opspec)
39036	return op.Output(0)
39037}
39038
39039// Computes the sparse Cholesky decomposition of `input`.
39040//
39041// Computes the Sparse Cholesky decomposition of a sparse matrix, with the given
39042// fill-in reducing permutation.
39043//
39044// The input sparse matrix and the fill-in reducing permutation `permutation` must
39045// have compatible shapes. If the sparse matrix has rank 3; with the batch
39046// dimension `B`, then the `permutation` must be of rank 2; with the same batch
39047// dimension `B`. There is no support for broadcasting.
39048//
39049// Furthermore, each component vector of `permutation` must be of length `N`,
39050// containing each of the integers {0, 1, ..., N - 1} exactly once, where `N` is
39051// the number of rows of each component of the sparse matrix.
39052//
39053// Each component of the input sparse matrix must represent a symmetric positive
39054// definite (SPD) matrix; although only the lower triangular part of the matrix is
39055// read. If any individual component is not SPD, then an InvalidArgument error is
39056// thrown.
39057//
39058// The returned sparse matrix has the same dense shape as the input sparse matrix.
39059// For each component `A` of the input sparse matrix, the corresponding output
39060// sparse matrix represents `L`, the lower triangular Cholesky factor satisfying
39061// the following identity:
39062//
39063// ```
39064//   A = L * Lt
39065// ```
39066//
39067// where Lt denotes the transpose of L (or its conjugate transpose, if `type` is
39068// `complex64` or `complex128`).
39069//
39070// The `type` parameter denotes the type of the matrix elements. The supported
39071// types are: `float32`, `float64`, `complex64` and `complex128`.
39072//
39073// Usage example:
39074//
39075// ```python
39076//     from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops
39077//
39078//     a_indices = np.array([[0, 0], [1, 1], [2, 1], [2, 2], [3, 3]])
39079//     a_values = np.array([1.0, 2.0, 1.0, 3.0, 4.0], np.float32)
39080//     a_dense_shape = [4, 4]
39081//
39082//     with tf.Session() as sess:
39083//       # Define (COO format) SparseTensor over Numpy array.
39084//       a_st = tf.sparse.SparseTensor(a_indices, a_values, a_dense_shape)
39085//
39086//       # Convert SparseTensors to CSR SparseMatrix.
39087//       a_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(
39088//           a_st.indices, a_st.values, a_st.dense_shape)
39089//
39090//       # Obtain the Sparse Cholesky factor using AMD Ordering for reducing zero
39091//       # fill-in (number of structural non-zeros in the sparse Cholesky factor).
39092//       ordering_amd = sparse_csr_matrix_ops.sparse_matrix_ordering_amd(sparse_matrix)
39093//       cholesky_sparse_matrices = (
39094//           sparse_csr_matrix_ops.sparse_matrix_sparse_cholesky(
39095//               sparse_matrix, ordering_amd, type=tf.float32))
39096//
39097//       # Convert the CSRSparseMatrix Cholesky factor to a dense Tensor
39098//       dense_cholesky = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
39099//           cholesky_sparse_matrices, tf.float32)
39100//
39101//       # Evaluate the dense Tensor value.
39102//       dense_cholesky_value = sess.run(dense_cholesky)
39103// ```
39104//
39105// `dense_cholesky_value` stores the dense Cholesky factor:
39106//
39107// ```
39108//     [[  1.  0.    0.    0.]
39109//      [  0.  1.41  0.    0.]
39110//      [  0.  0.70  1.58  0.]
39111//      [  0.  0.    0.    2.]]
39112// ```
39113//
39114//
39115// input: A `CSRSparseMatrix`.
39116// permutation: A `Tensor`.
39117// type: The type of `input`.
39118//
39119// Arguments:
39120//	input: A `CSRSparseMatrix`.
39121//	permutation: A fill-in reducing permutation matrix.
39122//
39123//
39124// Returns The sparse Cholesky decompsition of `input`.
39125func SparseMatrixSparseCholesky(scope *Scope, input tf.Output, permutation tf.Output, type_ tf.DataType) (output tf.Output) {
39126	if scope.Err() != nil {
39127		return
39128	}
39129	attrs := map[string]interface{}{"type": type_}
39130	opspec := tf.OpSpec{
39131		Type: "SparseMatrixSparseCholesky",
39132		Input: []tf.Input{
39133			input, permutation,
39134		},
39135		Attrs: attrs,
39136	}
39137	op := scope.AddOperation(opspec)
39138	return op.Output(0)
39139}
39140
39141// StatefulTruncatedNormalAttr is an optional argument to StatefulTruncatedNormal.
39142type StatefulTruncatedNormalAttr func(optionalAttr)
39143
39144// StatefulTruncatedNormalDtype sets the optional dtype attribute to value.
39145//
39146// value: The type of the output.
39147// If not specified, defaults to DT_FLOAT
39148func StatefulTruncatedNormalDtype(value tf.DataType) StatefulTruncatedNormalAttr {
39149	return func(m optionalAttr) {
39150		m["dtype"] = value
39151	}
39152}
39153
39154// Outputs random values from a truncated normal distribution.
39155//
39156// The generated values follow a normal distribution with mean 0 and standard
39157// deviation 1, except that values whose magnitude is more than 2 standard
39158// deviations from the mean are dropped and re-picked.
39159//
39160// Arguments:
39161//	resource: The handle of the resource variable that stores the state of the RNG.
39162//	algorithm: The RNG algorithm.
39163//	shape: The shape of the output tensor.
39164//
39165// Returns Random values with specified shape.
39166func StatefulTruncatedNormal(scope *Scope, resource tf.Output, algorithm tf.Output, shape tf.Output, optional ...StatefulTruncatedNormalAttr) (output tf.Output) {
39167	if scope.Err() != nil {
39168		return
39169	}
39170	attrs := map[string]interface{}{}
39171	for _, a := range optional {
39172		a(attrs)
39173	}
39174	opspec := tf.OpSpec{
39175		Type: "StatefulTruncatedNormal",
39176		Input: []tf.Input{
39177			resource, algorithm, shape,
39178		},
39179		Attrs: attrs,
39180	}
39181	op := scope.AddOperation(opspec)
39182	return op.Output(0)
39183}
39184
39185// MeanAttr is an optional argument to Mean.
39186type MeanAttr func(optionalAttr)
39187
39188// MeanKeepDims sets the optional keep_dims attribute to value.
39189//
39190// value: If true, retain reduced dimensions with length 1.
39191// If not specified, defaults to false
39192func MeanKeepDims(value bool) MeanAttr {
39193	return func(m optionalAttr) {
39194		m["keep_dims"] = value
39195	}
39196}
39197
39198// Computes the mean of elements across dimensions of a tensor.
39199//
39200// Reduces `input` along the dimensions given in `axis`. Unless
39201// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
39202// `axis`. If `keep_dims` is true, the reduced dimensions are
39203// retained with length 1.
39204//
39205// Arguments:
39206//	input: The tensor to reduce.
39207//	axis: The dimensions to reduce. Must be in the range
39208// `[-rank(input), rank(input))`.
39209//
39210// Returns The reduced tensor.
39211func Mean(scope *Scope, input tf.Output, axis tf.Output, optional ...MeanAttr) (output tf.Output) {
39212	if scope.Err() != nil {
39213		return
39214	}
39215	attrs := map[string]interface{}{}
39216	for _, a := range optional {
39217		a(attrs)
39218	}
39219	opspec := tf.OpSpec{
39220		Type: "Mean",
39221		Input: []tf.Input{
39222			input, axis,
39223		},
39224		Attrs: attrs,
39225	}
39226	op := scope.AddOperation(opspec)
39227	return op.Output(0)
39228}
39229
39230// PaddingFIFOQueueV2Attr is an optional argument to PaddingFIFOQueueV2.
39231type PaddingFIFOQueueV2Attr func(optionalAttr)
39232
39233// PaddingFIFOQueueV2Shapes sets the optional shapes attribute to value.
39234//
39235// value: The shape of each component in a value. The length of this attr must
39236// be either 0 or the same as the length of component_types.
39237// Shapes of fixed rank but variable size are allowed by setting
39238// any shape dimension to -1.  In this case, the inputs' shape may vary along
39239// the given dimension, and DequeueMany will pad the given dimension with
39240// zeros up to the maximum shape of all elements in the given batch.
39241// If the length of this attr is 0, different queue elements may have
39242// different ranks and shapes, but only one element may be dequeued at a time.
39243// If not specified, defaults to <>
39244//
39245// REQUIRES: len(value) >= 0
39246func PaddingFIFOQueueV2Shapes(value []tf.Shape) PaddingFIFOQueueV2Attr {
39247	return func(m optionalAttr) {
39248		m["shapes"] = value
39249	}
39250}
39251
39252// PaddingFIFOQueueV2Capacity sets the optional capacity attribute to value.
39253//
39254// value: The upper bound on the number of elements in this queue.
39255// Negative numbers mean no limit.
39256// If not specified, defaults to -1
39257func PaddingFIFOQueueV2Capacity(value int64) PaddingFIFOQueueV2Attr {
39258	return func(m optionalAttr) {
39259		m["capacity"] = value
39260	}
39261}
39262
39263// PaddingFIFOQueueV2Container sets the optional container attribute to value.
39264//
39265// value: If non-empty, this queue is placed in the given container.
39266// Otherwise, a default container is used.
39267// If not specified, defaults to ""
39268func PaddingFIFOQueueV2Container(value string) PaddingFIFOQueueV2Attr {
39269	return func(m optionalAttr) {
39270		m["container"] = value
39271	}
39272}
39273
39274// PaddingFIFOQueueV2SharedName sets the optional shared_name attribute to value.
39275//
39276// value: If non-empty, this queue will be shared under the given name
39277// across multiple sessions.
39278// If not specified, defaults to ""
39279func PaddingFIFOQueueV2SharedName(value string) PaddingFIFOQueueV2Attr {
39280	return func(m optionalAttr) {
39281		m["shared_name"] = value
39282	}
39283}
39284
39285// A queue that produces elements in first-in first-out order.
39286//
39287// Variable-size shapes are allowed by setting the corresponding shape dimensions
39288// to 0 in the shape attr.  In this case DequeueMany will pad up to the maximum
39289// size of any given element in the minibatch.  See below for details.
39290//
39291// Arguments:
39292//	component_types: The type of each component in a value.
39293//
39294// Returns The handle to the queue.
39295func PaddingFIFOQueueV2(scope *Scope, component_types []tf.DataType, optional ...PaddingFIFOQueueV2Attr) (handle tf.Output) {
39296	if scope.Err() != nil {
39297		return
39298	}
39299	attrs := map[string]interface{}{"component_types": component_types}
39300	for _, a := range optional {
39301		a(attrs)
39302	}
39303	opspec := tf.OpSpec{
39304		Type: "PaddingFIFOQueueV2",
39305
39306		Attrs: attrs,
39307	}
39308	op := scope.AddOperation(opspec)
39309	return op.Output(0)
39310}
39311
39312// Returns true if queue is closed.
39313//
39314// This operation returns true if the queue is closed and false if the queue
39315// is open.
39316//
39317// Arguments:
39318//	handle: The handle to a queue.
39319func QueueIsClosedV2(scope *Scope, handle tf.Output) (is_closed tf.Output) {
39320	if scope.Err() != nil {
39321		return
39322	}
39323	opspec := tf.OpSpec{
39324		Type: "QueueIsClosedV2",
39325		Input: []tf.Input{
39326			handle,
39327		},
39328	}
39329	op := scope.AddOperation(opspec)
39330	return op.Output(0)
39331}
39332
39333// Checks whether a quantile stream has been initialized.
39334//
39335// An Op that checks if quantile stream resource is initialized.
39336//
39337// Arguments:
39338//	quantile_stream_resource_handle: resource; The reference to quantile stream resource handle.
39339//
39340// Returns bool; True if the resource is initialized, False otherwise.
39341func IsBoostedTreesQuantileStreamResourceInitialized(scope *Scope, quantile_stream_resource_handle tf.Output) (is_initialized tf.Output) {
39342	if scope.Err() != nil {
39343		return
39344	}
39345	opspec := tf.OpSpec{
39346		Type: "IsBoostedTreesQuantileStreamResourceInitialized",
39347		Input: []tf.Input{
39348			quantile_stream_resource_handle,
39349		},
39350	}
39351	op := scope.AddOperation(opspec)
39352	return op.Output(0)
39353}
39354
39355// Applies softmax to a batched N-D `SparseTensor`.
39356//
39357// The inputs represent an N-D SparseTensor  with logical shape `[..., B, C]`
39358// (where `N >= 2`), and with indices sorted in the canonical lexicographic order.
39359//
39360// This op is equivalent to applying the normal `tf.nn.softmax()` to each innermost
39361// logical submatrix with shape `[B, C]`, but with the catch that *the implicitly
39362// zero elements do not participate*.  Specifically, the algorithm is equivalent
39363// to the following:
39364//
39365//   (1) Applies `tf.nn.softmax()` to a densified view of each innermost submatrix
39366//       with shape `[B, C]`, along the size-C dimension;
39367//   (2) Masks out the original implicitly-zero locations;
39368//   (3) Renormalizes the remaining elements.
39369//
39370// Hence, the `SparseTensor` result has exactly the same non-zero indices and
39371// shape.
39372//
39373// Arguments:
39374//	sp_indices: 2-D.  `NNZ x R` matrix with the indices of non-empty values in a
39375// SparseTensor, in canonical ordering.
39376//	sp_values: 1-D.  `NNZ` non-empty values corresponding to `sp_indices`.
39377//	sp_shape: 1-D.  Shape of the input SparseTensor.
39378//
39379// Returns 1-D.  The `NNZ` values for the result `SparseTensor`.
39380func SparseSoftmax(scope *Scope, sp_indices tf.Output, sp_values tf.Output, sp_shape tf.Output) (output tf.Output) {
39381	if scope.Err() != nil {
39382		return
39383	}
39384	opspec := tf.OpSpec{
39385		Type: "SparseSoftmax",
39386		Input: []tf.Input{
39387			sp_indices, sp_values, sp_shape,
39388		},
39389	}
39390	op := scope.AddOperation(opspec)
39391	return op.Output(0)
39392}
39393
39394// An Op to permute tensors across replicated TPU instances.
39395//
39396// Each instance supplies its own input.
39397//
39398// For example, suppose there are 4 TPU instances: `[A, B, C, D]`. Passing
39399// source_target_pairs=`[[0,1],[1,2],[2,3],[3,0]]` gets the outputs:
39400// `[D, A, B, C]`.
39401//
39402// Arguments:
39403//	input: The local input to be permuted. Currently only supports float and
39404// bfloat16.
39405//	source_target_pairs: A tensor with shape [num_pairs, 2].
39406//
39407// Returns The permuted input.
39408func CollectivePermute(scope *Scope, input tf.Output, source_target_pairs tf.Output) (output tf.Output) {
39409	if scope.Err() != nil {
39410		return
39411	}
39412	opspec := tf.OpSpec{
39413		Type: "CollectivePermute",
39414		Input: []tf.Input{
39415			input, source_target_pairs,
39416		},
39417	}
39418	op := scope.AddOperation(opspec)
39419	return op.Output(0)
39420}
39421
39422// StringToNumberAttr is an optional argument to StringToNumber.
39423type StringToNumberAttr func(optionalAttr)
39424
39425// StringToNumberOutType sets the optional out_type attribute to value.
39426//
39427// value: The numeric type to interpret each string in `string_tensor` as.
39428// If not specified, defaults to DT_FLOAT
39429func StringToNumberOutType(value tf.DataType) StringToNumberAttr {
39430	return func(m optionalAttr) {
39431		m["out_type"] = value
39432	}
39433}
39434
39435// Converts each string in the input Tensor to the specified numeric type.
39436//
39437// (Note that int32 overflow results in an error while float overflow
39438// results in a rounded value.)
39439//
39440// Example:
39441//
39442// >>> strings = ["5.0", "3.0", "7.0"]
39443// >>> tf.strings.to_number(strings)
39444// <tf.Tensor: shape=(3,), dtype=float32, numpy=array([5., 3., 7.], dtype=float32)>
39445//
39446//
39447// Returns A Tensor of the same shape as the input `string_tensor`.
39448func StringToNumber(scope *Scope, string_tensor tf.Output, optional ...StringToNumberAttr) (output tf.Output) {
39449	if scope.Err() != nil {
39450		return
39451	}
39452	attrs := map[string]interface{}{}
39453	for _, a := range optional {
39454		a(attrs)
39455	}
39456	opspec := tf.OpSpec{
39457		Type: "StringToNumber",
39458		Input: []tf.Input{
39459			string_tensor,
39460		},
39461		Attrs: attrs,
39462	}
39463	op := scope.AddOperation(opspec)
39464	return op.Output(0)
39465}
39466
39467// Fast Fourier transform.
39468//
39469// Computes the 1-dimensional discrete Fourier transform over the inner-most
39470// dimension of `input`.
39471//
39472// Arguments:
39473//	input: A complex tensor.
39474//
39475// Returns A complex tensor of the same shape as `input`. The inner-most
39476//   dimension of `input` is replaced with its 1D Fourier transform.
39477//
39478// @compatibility(numpy)
39479// Equivalent to np.fft.fft
39480// @end_compatibility
39481func FFT(scope *Scope, input tf.Output) (output tf.Output) {
39482	if scope.Err() != nil {
39483		return
39484	}
39485	opspec := tf.OpSpec{
39486		Type: "FFT",
39487		Input: []tf.Input{
39488			input,
39489		},
39490	}
39491	op := scope.AddOperation(opspec)
39492	return op.Output(0)
39493}
39494
39495// RetrieveTPUEmbeddingAdagradParametersGradAccumDebugAttr is an optional argument to RetrieveTPUEmbeddingAdagradParametersGradAccumDebug.
39496type RetrieveTPUEmbeddingAdagradParametersGradAccumDebugAttr func(optionalAttr)
39497
39498// RetrieveTPUEmbeddingAdagradParametersGradAccumDebugTableId sets the optional table_id attribute to value.
39499// If not specified, defaults to -1
39500func RetrieveTPUEmbeddingAdagradParametersGradAccumDebugTableId(value int64) RetrieveTPUEmbeddingAdagradParametersGradAccumDebugAttr {
39501	return func(m optionalAttr) {
39502		m["table_id"] = value
39503	}
39504}
39505
39506// RetrieveTPUEmbeddingAdagradParametersGradAccumDebugTableName sets the optional table_name attribute to value.
39507// If not specified, defaults to ""
39508func RetrieveTPUEmbeddingAdagradParametersGradAccumDebugTableName(value string) RetrieveTPUEmbeddingAdagradParametersGradAccumDebugAttr {
39509	return func(m optionalAttr) {
39510		m["table_name"] = value
39511	}
39512}
39513
39514// RetrieveTPUEmbeddingAdagradParametersGradAccumDebugConfig sets the optional config attribute to value.
39515// If not specified, defaults to ""
39516func RetrieveTPUEmbeddingAdagradParametersGradAccumDebugConfig(value string) RetrieveTPUEmbeddingAdagradParametersGradAccumDebugAttr {
39517	return func(m optionalAttr) {
39518		m["config"] = value
39519	}
39520}
39521
39522// Retrieve Adagrad embedding parameters with debug support.
39523//
39524// An op that retrieves optimization parameters from embedding to host
39525// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
39526// the correct embedding table configuration. For example, this op is
39527// used to retrieve updated parameters before saving a checkpoint.
39528//
39529// Returns:
39530//	parameters: Parameter parameters updated by the Adagrad optimization algorithm.
39531//	accumulators: Parameter accumulators updated by the Adagrad optimization algorithm.
39532//	gradient_accumulators: Parameter gradient_accumulators updated by the Adagrad optimization algorithm.
39533func RetrieveTPUEmbeddingAdagradParametersGradAccumDebug(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingAdagradParametersGradAccumDebugAttr) (parameters tf.Output, accumulators tf.Output, gradient_accumulators tf.Output) {
39534	if scope.Err() != nil {
39535		return
39536	}
39537	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
39538	for _, a := range optional {
39539		a(attrs)
39540	}
39541	opspec := tf.OpSpec{
39542		Type: "RetrieveTPUEmbeddingAdagradParametersGradAccumDebug",
39543
39544		Attrs: attrs,
39545	}
39546	op := scope.AddOperation(opspec)
39547	return op.Output(0), op.Output(1), op.Output(2)
39548}
39549
39550// StatelessMultinomialAttr is an optional argument to StatelessMultinomial.
39551type StatelessMultinomialAttr func(optionalAttr)
39552
39553// StatelessMultinomialOutputDtype sets the optional output_dtype attribute to value.
39554// If not specified, defaults to DT_INT64
39555func StatelessMultinomialOutputDtype(value tf.DataType) StatelessMultinomialAttr {
39556	return func(m optionalAttr) {
39557		m["output_dtype"] = value
39558	}
39559}
39560
39561// Draws samples from a multinomial distribution.
39562//
39563// Arguments:
39564//	logits: 2-D Tensor with shape `[batch_size, num_classes]`.  Each slice `[i, :]`
39565// represents the unnormalized log probabilities for all classes.
39566//	num_samples: 0-D.  Number of independent samples to draw for each row slice.
39567//	seed: 2 seeds (shape [2]).
39568//
39569// Returns 2-D Tensor with shape `[batch_size, num_samples]`.  Each slice `[i, :]`
39570// contains the drawn class labels with range `[0, num_classes)`.
39571func StatelessMultinomial(scope *Scope, logits tf.Output, num_samples tf.Output, seed tf.Output, optional ...StatelessMultinomialAttr) (output tf.Output) {
39572	if scope.Err() != nil {
39573		return
39574	}
39575	attrs := map[string]interface{}{}
39576	for _, a := range optional {
39577		a(attrs)
39578	}
39579	opspec := tf.OpSpec{
39580		Type: "StatelessMultinomial",
39581		Input: []tf.Input{
39582			logits, num_samples, seed,
39583		},
39584		Attrs: attrs,
39585	}
39586	op := scope.AddOperation(opspec)
39587	return op.Output(0)
39588}
39589
39590// QuantizedMatMulWithBiasAndReluAttr is an optional argument to QuantizedMatMulWithBiasAndRelu.
39591type QuantizedMatMulWithBiasAndReluAttr func(optionalAttr)
39592
39593// QuantizedMatMulWithBiasAndReluToutput sets the optional Toutput attribute to value.
39594// If not specified, defaults to DT_QINT32
39595func QuantizedMatMulWithBiasAndReluToutput(value tf.DataType) QuantizedMatMulWithBiasAndReluAttr {
39596	return func(m optionalAttr) {
39597		m["Toutput"] = value
39598	}
39599}
39600
39601// QuantizedMatMulWithBiasAndReluTransposeA sets the optional transpose_a attribute to value.
39602//
39603// value: If true, `a` is transposed before multiplication.
39604// If not specified, defaults to false
39605func QuantizedMatMulWithBiasAndReluTransposeA(value bool) QuantizedMatMulWithBiasAndReluAttr {
39606	return func(m optionalAttr) {
39607		m["transpose_a"] = value
39608	}
39609}
39610
39611// QuantizedMatMulWithBiasAndReluTransposeB sets the optional transpose_b attribute to value.
39612//
39613// value: If true, `b` is transposed before multiplication.
39614// If not specified, defaults to false
39615func QuantizedMatMulWithBiasAndReluTransposeB(value bool) QuantizedMatMulWithBiasAndReluAttr {
39616	return func(m optionalAttr) {
39617		m["transpose_b"] = value
39618	}
39619}
39620
39621// QuantizedMatMulWithBiasAndReluInputQuantMode sets the optional input_quant_mode attribute to value.
39622//
39623// value: Input data quantization mode. Either MIN_FIRST(default) or SCALED.
39624// If not specified, defaults to "MIN_FIRST"
39625func QuantizedMatMulWithBiasAndReluInputQuantMode(value string) QuantizedMatMulWithBiasAndReluAttr {
39626	return func(m optionalAttr) {
39627		m["input_quant_mode"] = value
39628	}
39629}
39630
39631// Perform a quantized matrix multiplication of  `a` by the matrix `b` with bias
39632// add and relu fusion.
39633//
39634// The inputs must be two-dimensional matrices and 1D bias vector. And the inner
39635// dimension of `a` (after being transposed if `transpose_a` is non-zero) must
39636// match the outer dimension of `b` (after being transposed if `transposed_b` is
39637// non-zero). Then do broadcast add operation with bias values on the matrix
39638// multiplication result. The bias size must match inner dimension of `b`. Then do
39639// relu activation to get non-negative result.
39640//
39641// Arguments:
39642//	a: A matrix to be multiplied. Must be a two-dimensional tensor of type `quint8`.
39643//	b: A matrix to be multiplied and must be a two-dimensional tensor of type `qint8`.
39644//	bias: A 1D bias tensor with size matching with inner dimension of `b` (after being
39645// transposed if `transposed_b` is non-zero).
39646//	min_a: The float value that the lowest quantized `a` value represents.
39647//	max_a: The float value that the highest quantized `a` value represents.
39648//	min_b: The float value that the lowest quantized `b` value represents.
39649//	max_b: The float value that the highest quantized `b` value represents.
39650//
39651// Returns:
39652//	out
39653//	min_out: The float value that the lowest quantized output value represents.
39654//	max_out: The float value that the highest quantized output value represents.
39655func QuantizedMatMulWithBiasAndRelu(scope *Scope, a tf.Output, b tf.Output, bias tf.Output, min_a tf.Output, max_a tf.Output, min_b tf.Output, max_b tf.Output, optional ...QuantizedMatMulWithBiasAndReluAttr) (out tf.Output, min_out tf.Output, max_out tf.Output) {
39656	if scope.Err() != nil {
39657		return
39658	}
39659	attrs := map[string]interface{}{}
39660	for _, a := range optional {
39661		a(attrs)
39662	}
39663	opspec := tf.OpSpec{
39664		Type: "QuantizedMatMulWithBiasAndRelu",
39665		Input: []tf.Input{
39666			a, b, bias, min_a, max_a, min_b, max_b,
39667		},
39668		Attrs: attrs,
39669	}
39670	op := scope.AddOperation(opspec)
39671	return op.Output(0), op.Output(1), op.Output(2)
39672}
39673
39674// RetrieveTPUEmbeddingMomentumParametersGradAccumDebugAttr is an optional argument to RetrieveTPUEmbeddingMomentumParametersGradAccumDebug.
39675type RetrieveTPUEmbeddingMomentumParametersGradAccumDebugAttr func(optionalAttr)
39676
39677// RetrieveTPUEmbeddingMomentumParametersGradAccumDebugTableId sets the optional table_id attribute to value.
39678// If not specified, defaults to -1
39679func RetrieveTPUEmbeddingMomentumParametersGradAccumDebugTableId(value int64) RetrieveTPUEmbeddingMomentumParametersGradAccumDebugAttr {
39680	return func(m optionalAttr) {
39681		m["table_id"] = value
39682	}
39683}
39684
39685// RetrieveTPUEmbeddingMomentumParametersGradAccumDebugTableName sets the optional table_name attribute to value.
39686// If not specified, defaults to ""
39687func RetrieveTPUEmbeddingMomentumParametersGradAccumDebugTableName(value string) RetrieveTPUEmbeddingMomentumParametersGradAccumDebugAttr {
39688	return func(m optionalAttr) {
39689		m["table_name"] = value
39690	}
39691}
39692
39693// RetrieveTPUEmbeddingMomentumParametersGradAccumDebugConfig sets the optional config attribute to value.
39694// If not specified, defaults to ""
39695func RetrieveTPUEmbeddingMomentumParametersGradAccumDebugConfig(value string) RetrieveTPUEmbeddingMomentumParametersGradAccumDebugAttr {
39696	return func(m optionalAttr) {
39697		m["config"] = value
39698	}
39699}
39700
39701// Retrieve Momentum embedding parameters with debug support.
39702//
39703// An op that retrieves optimization parameters from embedding to host
39704// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
39705// the correct embedding table configuration. For example, this op is
39706// used to retrieve updated parameters before saving a checkpoint.
39707//
39708// Returns:
39709//	parameters: Parameter parameters updated by the Momentum optimization algorithm.
39710//	momenta: Parameter momenta updated by the Momentum optimization algorithm.
39711//	gradient_accumulators: Parameter gradient_accumulators updated by the Momentum optimization algorithm.
39712func RetrieveTPUEmbeddingMomentumParametersGradAccumDebug(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingMomentumParametersGradAccumDebugAttr) (parameters tf.Output, momenta tf.Output, gradient_accumulators tf.Output) {
39713	if scope.Err() != nil {
39714		return
39715	}
39716	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
39717	for _, a := range optional {
39718		a(attrs)
39719	}
39720	opspec := tf.OpSpec{
39721		Type: "RetrieveTPUEmbeddingMomentumParametersGradAccumDebug",
39722
39723		Attrs: attrs,
39724	}
39725	op := scope.AddOperation(opspec)
39726	return op.Output(0), op.Output(1), op.Output(2)
39727}
39728
39729// ResourceApplyRMSPropAttr is an optional argument to ResourceApplyRMSProp.
39730type ResourceApplyRMSPropAttr func(optionalAttr)
39731
39732// ResourceApplyRMSPropUseLocking sets the optional use_locking attribute to value.
39733//
39734// value: If `True`, updating of the var, ms, and mom tensors is protected
39735// by a lock; otherwise the behavior is undefined, but may exhibit less
39736// contention.
39737// If not specified, defaults to false
39738func ResourceApplyRMSPropUseLocking(value bool) ResourceApplyRMSPropAttr {
39739	return func(m optionalAttr) {
39740		m["use_locking"] = value
39741	}
39742}
39743
39744// Update '*var' according to the RMSProp algorithm.
39745//
39746// Note that in dense implementation of this algorithm, ms and mom will
39747// update even if the grad is zero, but in this sparse implementation, ms
39748// and mom will not update in iterations during which the grad is zero.
39749//
39750// mean_square = decay * mean_square + (1-decay) * gradient ** 2
39751// Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
39752//
39753// ms <- rho * ms_{t-1} + (1-rho) * grad * grad
39754// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
39755// var <- var - mom
39756//
39757// Arguments:
39758//	var_: Should be from a Variable().
39759//	ms: Should be from a Variable().
39760//	mom: Should be from a Variable().
39761//	lr: Scaling factor. Must be a scalar.
39762//	rho: Decay rate. Must be a scalar.
39763//
39764//	epsilon: Ridge term. Must be a scalar.
39765//	grad: The gradient.
39766//
39767// Returns the created operation.
39768func ResourceApplyRMSProp(scope *Scope, var_ tf.Output, ms tf.Output, mom tf.Output, lr tf.Output, rho tf.Output, momentum tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyRMSPropAttr) (o *tf.Operation) {
39769	if scope.Err() != nil {
39770		return
39771	}
39772	attrs := map[string]interface{}{}
39773	for _, a := range optional {
39774		a(attrs)
39775	}
39776	opspec := tf.OpSpec{
39777		Type: "ResourceApplyRMSProp",
39778		Input: []tf.Input{
39779			var_, ms, mom, lr, rho, momentum, epsilon, grad,
39780		},
39781		Attrs: attrs,
39782	}
39783	return scope.AddOperation(opspec)
39784}
39785
39786// MaxPool3DGradAttr is an optional argument to MaxPool3DGrad.
39787type MaxPool3DGradAttr func(optionalAttr)
39788
39789// MaxPool3DGradDataFormat sets the optional data_format attribute to value.
39790//
39791// value: The data format of the input and output data. With the
39792// default format "NDHWC", the data is stored in the order of:
39793//     [batch, in_depth, in_height, in_width, in_channels].
39794// Alternatively, the format could be "NCDHW", the data storage order is:
39795//     [batch, in_channels, in_depth, in_height, in_width].
39796// If not specified, defaults to "NDHWC"
39797func MaxPool3DGradDataFormat(value string) MaxPool3DGradAttr {
39798	return func(m optionalAttr) {
39799		m["data_format"] = value
39800	}
39801}
39802
39803// Computes gradients of 3D max pooling function.
39804//
39805// Arguments:
39806//	orig_input: The original input tensor.
39807//	orig_output: The original output tensor.
39808//	grad: Output backprop of shape `[batch, depth, rows, cols, channels]`.
39809//	ksize: 1-D tensor of length 5. The size of the window for each dimension of
39810// the input tensor. Must have `ksize[0] = ksize[4] = 1`.
39811//	strides: 1-D tensor of length 5. The stride of the sliding window for each
39812// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
39813//	padding: The type of padding algorithm to use.
39814func MaxPool3DGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPool3DGradAttr) (output tf.Output) {
39815	if scope.Err() != nil {
39816		return
39817	}
39818	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
39819	for _, a := range optional {
39820		a(attrs)
39821	}
39822	opspec := tf.OpSpec{
39823		Type: "MaxPool3DGrad",
39824		Input: []tf.Input{
39825			orig_input, orig_output, grad,
39826		},
39827		Attrs: attrs,
39828	}
39829	op := scope.AddOperation(opspec)
39830	return op.Output(0)
39831}
39832
39833// Creates a dataset that executes a SQL query and emits rows of the result set.
39834//
39835// Arguments:
39836//	driver_name: The database type. Currently, the only supported type is 'sqlite'.
39837//	data_source_name: A connection string to connect to the database.
39838//	query: A SQL query to execute.
39839//
39840//
39841func SqlDataset(scope *Scope, driver_name tf.Output, data_source_name tf.Output, query tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
39842	if scope.Err() != nil {
39843		return
39844	}
39845	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
39846	opspec := tf.OpSpec{
39847		Type: "SqlDataset",
39848		Input: []tf.Input{
39849			driver_name, data_source_name, query,
39850		},
39851		Attrs: attrs,
39852	}
39853	op := scope.AddOperation(opspec)
39854	return op.Output(0)
39855}
39856
39857// Outputs deterministic pseudorandom random integers from a uniform distribution.
39858//
39859// The generated values follow a uniform distribution in the range `[minval, maxval)`.
39860//
39861// The outputs are a deterministic function of `shape`, `seed`, `minval`, and `maxval`.
39862//
39863// Arguments:
39864//	shape: The shape of the output tensor.
39865//	seed: 2 seeds (shape [2]).
39866//	minval: Minimum value (inclusive, scalar).
39867//	maxval: Maximum value (exclusive, scalar).
39868//
39869// Returns Random values with specified shape.
39870func StatelessRandomUniformInt(scope *Scope, shape tf.Output, seed tf.Output, minval tf.Output, maxval tf.Output) (output tf.Output) {
39871	if scope.Err() != nil {
39872		return
39873	}
39874	opspec := tf.OpSpec{
39875		Type: "StatelessRandomUniformInt",
39876		Input: []tf.Input{
39877			shape, seed, minval, maxval,
39878		},
39879	}
39880	op := scope.AddOperation(opspec)
39881	return op.Output(0)
39882}
39883
39884// Returns a copy of the input tensor.
39885func Snapshot(scope *Scope, input tf.Output) (output tf.Output) {
39886	if scope.Err() != nil {
39887		return
39888	}
39889	opspec := tf.OpSpec{
39890		Type: "Snapshot",
39891		Input: []tf.Input{
39892			input,
39893		},
39894	}
39895	op := scope.AddOperation(opspec)
39896	return op.Output(0)
39897}
39898
39899// Adds up a `SparseTensor` and a dense `Tensor`, producing a dense `Tensor`.
39900//
39901// This Op does not require `a_indices` be sorted in standard lexicographic order.
39902//
39903// Arguments:
39904//	a_indices: 2-D.  The `indices` of the `SparseTensor`, with shape `[nnz, ndims]`.
39905//	a_values: 1-D.  The `values` of the `SparseTensor`, with shape `[nnz]`.
39906//	a_shape: 1-D.  The `shape` of the `SparseTensor`, with shape `[ndims]`.
39907//	b: `ndims`-D Tensor.  With shape `a_shape`.
39908func SparseTensorDenseAdd(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, b tf.Output) (output tf.Output) {
39909	if scope.Err() != nil {
39910		return
39911	}
39912	opspec := tf.OpSpec{
39913		Type: "SparseTensorDenseAdd",
39914		Input: []tf.Input{
39915			a_indices, a_values, a_shape, b,
39916		},
39917	}
39918	op := scope.AddOperation(opspec)
39919	return op.Output(0)
39920}
39921
39922// RaggedBincountAttr is an optional argument to RaggedBincount.
39923type RaggedBincountAttr func(optionalAttr)
39924
39925// RaggedBincountBinaryOutput sets the optional binary_output attribute to value.
39926//
39927// value: bool; Whether the kernel should count the appearance or number of occurrences.
39928// If not specified, defaults to false
39929func RaggedBincountBinaryOutput(value bool) RaggedBincountAttr {
39930	return func(m optionalAttr) {
39931		m["binary_output"] = value
39932	}
39933}
39934
39935// Counts the number of occurrences of each value in an integer array.
39936//
39937// Outputs a vector with length `size` and the same dtype as `weights`. If
39938// `weights` are empty, then index `i` stores the number of times the value `i` is
39939// counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of
39940// the value in `weights` at each index where the corresponding value in `arr` is
39941// `i`.
39942//
39943// Values in `arr` outside of the range [0, size) are ignored.
39944//
39945// Arguments:
39946//	splits: 1D int64 `Tensor`.
39947//	values: 2D int `Tensor`.
39948//	size: non-negative int scalar `Tensor`.
39949//	weights: is an int32, int64, float32, or float64 `Tensor` with the same
39950// shape as `input`, or a length-0 `Tensor`, in which case it acts as all weights
39951// equal to 1.
39952//
39953// Returns 1D `Tensor` with length equal to `size` or 2D `Tensor` with [batch_size, `size`].
39954// The counts or summed weights for each value in the range [0, size).
39955func RaggedBincount(scope *Scope, splits tf.Output, values tf.Output, size tf.Output, weights tf.Output, optional ...RaggedBincountAttr) (output tf.Output) {
39956	if scope.Err() != nil {
39957		return
39958	}
39959	attrs := map[string]interface{}{}
39960	for _, a := range optional {
39961		a(attrs)
39962	}
39963	opspec := tf.OpSpec{
39964		Type: "RaggedBincount",
39965		Input: []tf.Input{
39966			splits, values, size, weights,
39967		},
39968		Attrs: attrs,
39969	}
39970	op := scope.AddOperation(opspec)
39971	return op.Output(0)
39972}
39973
39974// StatelessRandomNormalAttr is an optional argument to StatelessRandomNormal.
39975type StatelessRandomNormalAttr func(optionalAttr)
39976
39977// StatelessRandomNormalDtype sets the optional dtype attribute to value.
39978//
39979// value: The type of the output.
39980// If not specified, defaults to DT_FLOAT
39981func StatelessRandomNormalDtype(value tf.DataType) StatelessRandomNormalAttr {
39982	return func(m optionalAttr) {
39983		m["dtype"] = value
39984	}
39985}
39986
39987// Outputs deterministic pseudorandom values from a normal distribution.
39988//
39989// The generated values will have mean 0 and standard deviation 1.
39990//
39991// The outputs are a deterministic function of `shape` and `seed`.
39992//
39993// Arguments:
39994//	shape: The shape of the output tensor.
39995//	seed: 2 seeds (shape [2]).
39996//
39997// Returns Random values with specified shape.
39998func StatelessRandomNormal(scope *Scope, shape tf.Output, seed tf.Output, optional ...StatelessRandomNormalAttr) (output tf.Output) {
39999	if scope.Err() != nil {
40000		return
40001	}
40002	attrs := map[string]interface{}{}
40003	for _, a := range optional {
40004		a(attrs)
40005	}
40006	opspec := tf.OpSpec{
40007		Type: "StatelessRandomNormal",
40008		Input: []tf.Input{
40009			shape, seed,
40010		},
40011		Attrs: attrs,
40012	}
40013	op := scope.AddOperation(opspec)
40014	return op.Output(0)
40015}
40016
40017// RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebugAttr is an optional argument to RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.
40018type RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebugAttr func(optionalAttr)
40019
40020// RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebugTableId sets the optional table_id attribute to value.
40021// If not specified, defaults to -1
40022func RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebugTableId(value int64) RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebugAttr {
40023	return func(m optionalAttr) {
40024		m["table_id"] = value
40025	}
40026}
40027
40028// RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebugTableName sets the optional table_name attribute to value.
40029// If not specified, defaults to ""
40030func RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebugTableName(value string) RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebugAttr {
40031	return func(m optionalAttr) {
40032		m["table_name"] = value
40033	}
40034}
40035
40036// RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebugConfig sets the optional config attribute to value.
40037// If not specified, defaults to ""
40038func RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebugConfig(value string) RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebugAttr {
40039	return func(m optionalAttr) {
40040		m["config"] = value
40041	}
40042}
40043
40044// Retrieve SGD embedding parameters with debug support.
40045//
40046// An op that retrieves optimization parameters from embedding to host
40047// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
40048// the correct embedding table configuration. For example, this op is
40049// used to retrieve updated parameters before saving a checkpoint.
40050//
40051// Returns:
40052//	parameters: Parameter parameters updated by the stochastic gradient descent optimization algorithm.
40053//	gradient_accumulators: Parameter gradient_accumulators updated by the Adadelta optimization algorithm.
40054func RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebugAttr) (parameters tf.Output, gradient_accumulators tf.Output) {
40055	if scope.Err() != nil {
40056		return
40057	}
40058	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
40059	for _, a := range optional {
40060		a(attrs)
40061	}
40062	opspec := tf.OpSpec{
40063		Type: "RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug",
40064
40065		Attrs: attrs,
40066	}
40067	op := scope.AddOperation(opspec)
40068	return op.Output(0), op.Output(1)
40069}
40070
40071// StatelessRandomUniformAttr is an optional argument to StatelessRandomUniform.
40072type StatelessRandomUniformAttr func(optionalAttr)
40073
40074// StatelessRandomUniformDtype sets the optional dtype attribute to value.
40075//
40076// value: The type of the output.
40077// If not specified, defaults to DT_FLOAT
40078func StatelessRandomUniformDtype(value tf.DataType) StatelessRandomUniformAttr {
40079	return func(m optionalAttr) {
40080		m["dtype"] = value
40081	}
40082}
40083
40084// Outputs deterministic pseudorandom random values from a uniform distribution.
40085//
40086// The generated values follow a uniform distribution in the range `[0, 1)`. The
40087// lower bound 0 is included in the range, while the upper bound 1 is excluded.
40088//
40089// The outputs are a deterministic function of `shape` and `seed`.
40090//
40091// Arguments:
40092//	shape: The shape of the output tensor.
40093//	seed: 2 seeds (shape [2]).
40094//
40095// Returns Random values with specified shape.
40096func StatelessRandomUniform(scope *Scope, shape tf.Output, seed tf.Output, optional ...StatelessRandomUniformAttr) (output tf.Output) {
40097	if scope.Err() != nil {
40098		return
40099	}
40100	attrs := map[string]interface{}{}
40101	for _, a := range optional {
40102		a(attrs)
40103	}
40104	opspec := tf.OpSpec{
40105		Type: "StatelessRandomUniform",
40106		Input: []tf.Input{
40107			shape, seed,
40108		},
40109		Attrs: attrs,
40110	}
40111	op := scope.AddOperation(opspec)
40112	return op.Output(0)
40113}
40114
40115// Picks the best algorithm based on device, and scrambles seed into key and counter.
40116//
40117// This op picks the best counter-based RNG algorithm based on device, and scrambles a shape-[2] seed into a key and a counter, both needed by the counter-based algorithm. The scrambling is opaque but approximately satisfies the property that different seed results in different key/counter pair (which will in turn result in different random numbers).
40118//
40119// Arguments:
40120//	seed: 2 seeds (shape [2]).
40121//
40122// Returns:
40123//	key: Key for the counter-based RNG algorithm (shape uint64[1]).
40124//	counter: Counter for the counter-based RNG algorithm. Since counter size is algorithm-dependent, this output will be right-padded with zeros to reach shape uint64[2] (the current maximal counter size among algorithms).
40125//	alg: The RNG algorithm (shape int32[]).
40126func StatelessRandomGetKeyCounterAlg(scope *Scope, seed tf.Output) (key tf.Output, counter tf.Output, alg tf.Output) {
40127	if scope.Err() != nil {
40128		return
40129	}
40130	opspec := tf.OpSpec{
40131		Type: "StatelessRandomGetKeyCounterAlg",
40132		Input: []tf.Input{
40133			seed,
40134		},
40135	}
40136	op := scope.AddOperation(opspec)
40137	return op.Output(0), op.Output(1), op.Output(2)
40138}
40139
40140// Computes the sum along sparse segments of a tensor.
40141//
40142// Read
40143// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
40144// for an explanation of segments.
40145//
40146// Like `SegmentSum`, but `segment_ids` can have rank less than `data`'s first
40147// dimension, selecting a subset of dimension 0, specified by `indices`.
40148//
40149// For example:
40150//
40151// ```python
40152// c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
40153//
40154// # Select two rows, one segment.
40155// tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
40156// # => [[0 0 0 0]]
40157//
40158// # Select two rows, two segment.
40159// tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))
40160// # => [[ 1  2  3  4]
40161// #     [-1 -2 -3 -4]]
40162//
40163// # Select all rows, two segments.
40164// tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
40165// # => [[0 0 0 0]
40166// #     [5 6 7 8]]
40167//
40168// # Which is equivalent to:
40169// tf.segment_sum(c, tf.constant([0, 0, 1]))
40170// ```
40171//
40172// Arguments:
40173//
40174//	indices: A 1-D tensor. Has same rank as `segment_ids`.
40175//	segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
40176//
40177// Returns Has same shape as data, except for dimension 0 which
40178// has size `k`, the number of segments.
40179func SparseSegmentSum(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output) (output tf.Output) {
40180	if scope.Err() != nil {
40181		return
40182	}
40183	opspec := tf.OpSpec{
40184		Type: "SparseSegmentSum",
40185		Input: []tf.Input{
40186			data, indices, segment_ids,
40187		},
40188	}
40189	op := scope.AddOperation(opspec)
40190	return op.Output(0)
40191}
40192
40193// BoostedTreesUpdateEnsembleV2Attr is an optional argument to BoostedTreesUpdateEnsembleV2.
40194type BoostedTreesUpdateEnsembleV2Attr func(optionalAttr)
40195
40196// BoostedTreesUpdateEnsembleV2LogitsDimension sets the optional logits_dimension attribute to value.
40197//
40198// value: scalar, dimension of the logits
40199// If not specified, defaults to 1
40200func BoostedTreesUpdateEnsembleV2LogitsDimension(value int64) BoostedTreesUpdateEnsembleV2Attr {
40201	return func(m optionalAttr) {
40202		m["logits_dimension"] = value
40203	}
40204}
40205
40206// Updates the tree ensemble by adding a layer to the last tree being grown
40207//
40208// or by starting a new tree.
40209//
40210// Arguments:
40211//	tree_ensemble_handle: Handle to the ensemble variable.
40212//	feature_ids: Rank 1 tensor with ids for each feature. This is the real id of
40213// the feature that will be used in the split.
40214//	dimension_ids: List of rank 1 tensors representing the dimension in each feature.
40215//	node_ids: List of rank 1 tensors representing the nodes for which this feature
40216// has a split.
40217//	gains: List of rank 1 tensors representing the gains for each of the feature's
40218// split.
40219//	thresholds: List of rank 1 tensors representing the thesholds for each of the
40220// feature's split.
40221//	left_node_contribs: List of rank 2 tensors with left leaf contribs for each of
40222// the feature's splits. Will be added to the previous node values to constitute
40223// the values of the left nodes.
40224//	right_node_contribs: List of rank 2 tensors with right leaf contribs for each
40225// of the feature's splits. Will be added to the previous node values to constitute
40226// the values of the right nodes.
40227//	split_types: List of rank 1 tensors representing the split type for each feature.
40228//	max_depth: Max depth of the tree to build.
40229//	learning_rate: shrinkage const for each new tree.
40230//	pruning_mode: 0-No pruning, 1-Pre-pruning, 2-Post-pruning.
40231//
40232// Returns the created operation.
40233func BoostedTreesUpdateEnsembleV2(scope *Scope, tree_ensemble_handle tf.Output, feature_ids []tf.Output, dimension_ids []tf.Output, node_ids []tf.Output, gains []tf.Output, thresholds []tf.Output, left_node_contribs []tf.Output, right_node_contribs []tf.Output, split_types []tf.Output, max_depth tf.Output, learning_rate tf.Output, pruning_mode tf.Output, optional ...BoostedTreesUpdateEnsembleV2Attr) (o *tf.Operation) {
40234	if scope.Err() != nil {
40235		return
40236	}
40237	attrs := map[string]interface{}{}
40238	for _, a := range optional {
40239		a(attrs)
40240	}
40241	opspec := tf.OpSpec{
40242		Type: "BoostedTreesUpdateEnsembleV2",
40243		Input: []tf.Input{
40244			tree_ensemble_handle, tf.OutputList(feature_ids), tf.OutputList(dimension_ids), tf.OutputList(node_ids), tf.OutputList(gains), tf.OutputList(thresholds), tf.OutputList(left_node_contribs), tf.OutputList(right_node_contribs), tf.OutputList(split_types), max_depth, learning_rate, pruning_mode,
40245		},
40246		Attrs: attrs,
40247	}
40248	return scope.AddOperation(opspec)
40249}
40250
40251// Picks the best counter-based RNG algorithm based on device.
40252//
40253// This op picks the best counter-based RNG algorithm based on device.
40254//
40255// Returns The RNG algorithm (shape int32[]).
40256func StatelessRandomGetAlg(scope *Scope) (alg tf.Output) {
40257	if scope.Err() != nil {
40258		return
40259	}
40260	opspec := tf.OpSpec{
40261		Type: "StatelessRandomGetAlg",
40262	}
40263	op := scope.AddOperation(opspec)
40264	return op.Output(0)
40265}
40266
40267// Gives a guarantee to the TF runtime that the input tensor is a constant.
40268//
40269// The runtime is then free to make optimizations based on this.
40270//
40271// Only accepts value typed tensors as inputs and rejects resource variable handles
40272// as input.
40273//
40274// Returns the input tensor without modification.
40275func GuaranteeConst(scope *Scope, input tf.Output) (output tf.Output) {
40276	if scope.Err() != nil {
40277		return
40278	}
40279	opspec := tf.OpSpec{
40280		Type: "GuaranteeConst",
40281		Input: []tf.Input{
40282			input,
40283		},
40284	}
40285	op := scope.AddOperation(opspec)
40286	return op.Output(0)
40287}
40288
40289// Transforms a tf.Example proto (as a string) into typed tensors.
40290//
40291// Arguments:
40292//	serialized: A vector containing a batch of binary serialized Example protos.
40293//	dense_defaults: A list of Tensors (some may be empty), whose length matches
40294// the length of `dense_keys`. dense_defaults[j] provides default values
40295// when the example's feature_map lacks dense_key[j].  If an empty Tensor is
40296// provided for dense_defaults[j], then the Feature dense_keys[j] is required.
40297// The input type is inferred from dense_defaults[j], even when it's empty.
40298// If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined,
40299// then the shape of dense_defaults[j] must match that of dense_shapes[j].
40300// If dense_shapes[j] has an undefined major dimension (variable strides dense
40301// feature), dense_defaults[j] must contain a single element:
40302// the padding element.
40303//	num_sparse: The number of sparse features to be parsed from the example. This
40304// must match the lengths of `sparse_keys` and `sparse_types`.
40305//	sparse_keys: A list of `num_sparse` strings.
40306// The keys expected in the Examples' features associated with sparse values.
40307//	dense_keys: The keys expected in the Examples' features associated with dense
40308// values.
40309//	sparse_types: A list of `num_sparse` types; the data types of data in each
40310// Feature given in sparse_keys.
40311// Currently the ParseSingleExample op supports DT_FLOAT (FloatList),
40312// DT_INT64 (Int64List), and DT_STRING (BytesList).
40313//	dense_shapes: The shapes of data in each Feature given in dense_keys.
40314// The length of this list must match the length of `dense_keys`.  The
40315// number of elements in the Feature corresponding to dense_key[j] must
40316// always equal dense_shapes[j].NumEntries().  If dense_shapes[j] ==
40317// (D0, D1, ..., DN) then the shape of output Tensor dense_values[j]
40318// will be (D0, D1, ..., DN): In the case dense_shapes[j] = (-1, D1,
40319// ..., DN), the shape of the output Tensor dense_values[j] will be (M,
40320// D1, .., DN), where M is the number of blocks of elements of length
40321// D1 * .... * DN, in the input.
40322func ParseSingleExample(scope *Scope, serialized tf.Output, dense_defaults []tf.Output, num_sparse int64, sparse_keys []string, dense_keys []string, sparse_types []tf.DataType, dense_shapes []tf.Shape) (sparse_indices []tf.Output, sparse_values []tf.Output, sparse_shapes []tf.Output, dense_values []tf.Output) {
40323	if scope.Err() != nil {
40324		return
40325	}
40326	attrs := map[string]interface{}{"num_sparse": num_sparse, "sparse_keys": sparse_keys, "dense_keys": dense_keys, "sparse_types": sparse_types, "dense_shapes": dense_shapes}
40327	opspec := tf.OpSpec{
40328		Type: "ParseSingleExample",
40329		Input: []tf.Input{
40330			serialized, tf.OutputList(dense_defaults),
40331		},
40332		Attrs: attrs,
40333	}
40334	op := scope.AddOperation(opspec)
40335	if scope.Err() != nil {
40336		return
40337	}
40338	var idx int
40339	var err error
40340	if sparse_indices, idx, err = makeOutputList(op, idx, "sparse_indices"); err != nil {
40341		scope.UpdateErr("ParseSingleExample", err)
40342		return
40343	}
40344	if sparse_values, idx, err = makeOutputList(op, idx, "sparse_values"); err != nil {
40345		scope.UpdateErr("ParseSingleExample", err)
40346		return
40347	}
40348	if sparse_shapes, idx, err = makeOutputList(op, idx, "sparse_shapes"); err != nil {
40349		scope.UpdateErr("ParseSingleExample", err)
40350		return
40351	}
40352	if dense_values, idx, err = makeOutputList(op, idx, "dense_values"); err != nil {
40353		scope.UpdateErr("ParseSingleExample", err)
40354		return
40355	}
40356	return sparse_indices, sparse_values, sparse_shapes, dense_values
40357}
40358
40359// ResourceScatterNdUpdateAttr is an optional argument to ResourceScatterNdUpdate.
40360type ResourceScatterNdUpdateAttr func(optionalAttr)
40361
40362// ResourceScatterNdUpdateUseLocking sets the optional use_locking attribute to value.
40363//
40364// value: An optional bool. Defaults to True. If True, the assignment will
40365// be protected by a lock; otherwise the behavior is undefined,
40366// but may exhibit less contention.
40367// If not specified, defaults to true
40368func ResourceScatterNdUpdateUseLocking(value bool) ResourceScatterNdUpdateAttr {
40369	return func(m optionalAttr) {
40370		m["use_locking"] = value
40371	}
40372}
40373
40374// Applies sparse `updates` to individual values or slices within a given
40375//
40376// variable according to `indices`.
40377//
40378// `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
40379//
40380// `indices` must be integer tensor, containing indices into `ref`.
40381// It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
40382//
40383// The innermost dimension of `indices` (with length `K`) corresponds to
40384// indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
40385// dimension of `ref`.
40386//
40387// `updates` is `Tensor` of rank `Q-1+P-K` with shape:
40388//
40389// ```
40390// [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
40391// ```
40392//
40393// For example, say we want to update 4 scattered elements to a rank-1 tensor to
40394// 8 elements. In Python, that update would look like this:
40395//
40396// ```python
40397//     ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
40398//     indices = tf.constant([[4], [3], [1] ,[7]])
40399//     updates = tf.constant([9, 10, 11, 12])
40400//     update = tf.scatter_nd_update(ref, indices, updates)
40401//     with tf.Session() as sess:
40402//       print sess.run(update)
40403// ```
40404//
40405// The resulting update to ref would look like this:
40406//
40407//     [1, 11, 3, 10, 9, 6, 7, 12]
40408//
40409// See `tf.scatter_nd` for more details about how to make updates to
40410// slices.
40411//
40412// Arguments:
40413//	ref: A resource handle. Must be from a VarHandleOp.
40414//	indices: A Tensor. Must be one of the following types: int32, int64.
40415// A tensor of indices into ref.
40416//	updates: A Tensor. Must have the same type as ref. A tensor of updated
40417// values to add to ref.
40418//
40419// Returns the created operation.
40420func ResourceScatterNdUpdate(scope *Scope, ref tf.Output, indices tf.Output, updates tf.Output, optional ...ResourceScatterNdUpdateAttr) (o *tf.Operation) {
40421	if scope.Err() != nil {
40422		return
40423	}
40424	attrs := map[string]interface{}{}
40425	for _, a := range optional {
40426		a(attrs)
40427	}
40428	opspec := tf.OpSpec{
40429		Type: "ResourceScatterNdUpdate",
40430		Input: []tf.Input{
40431			ref, indices, updates,
40432		},
40433		Attrs: attrs,
40434	}
40435	return scope.AddOperation(opspec)
40436}
40437
40438// EnqueueTPUEmbeddingSparseBatchAttr is an optional argument to EnqueueTPUEmbeddingSparseBatch.
40439type EnqueueTPUEmbeddingSparseBatchAttr func(optionalAttr)
40440
40441// EnqueueTPUEmbeddingSparseBatchDeviceOrdinal sets the optional device_ordinal attribute to value.
40442//
40443// value: The TPU device to use. Should be >= 0 and less than the number
40444// of TPU cores in the task on which the node is placed.
40445// If not specified, defaults to -1
40446func EnqueueTPUEmbeddingSparseBatchDeviceOrdinal(value int64) EnqueueTPUEmbeddingSparseBatchAttr {
40447	return func(m optionalAttr) {
40448		m["device_ordinal"] = value
40449	}
40450}
40451
40452// EnqueueTPUEmbeddingSparseBatchCombiners sets the optional combiners attribute to value.
40453//
40454// value: A list of string scalars, one for each embedding table that specify
40455// how to normalize the embedding activations after weighted summation.
40456// Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have
40457// the sum of the weights be 0 for 'mean' or the sum of the squared weights be
40458// 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for
40459// all tables.
40460// If not specified, defaults to <>
40461func EnqueueTPUEmbeddingSparseBatchCombiners(value []string) EnqueueTPUEmbeddingSparseBatchAttr {
40462	return func(m optionalAttr) {
40463		m["combiners"] = value
40464	}
40465}
40466
40467// An op that enqueues TPUEmbedding input indices from a SparseTensor.
40468//
40469// This Op eases the porting of code that uses embedding_lookup_sparse(),
40470// although some Python preprocessing of the SparseTensor arguments to
40471// embedding_lookup_sparse() is required to produce the arguments to this Op,
40472// since only a single EnqueueTPUEmbeddingSparseBatch Op is allowed per training
40473// step.
40474//
40475// The tensors at corresponding positions in the three input lists
40476// must have the same shape, i.e. rank 1 with dim_size() equal to the total
40477// number of lookups into the table described by the corresponding table_id.
40478//
40479// Arguments:
40480//	sample_indices: A list of rank 1 Tensors specifying the training example and
40481// feature to which the corresponding embedding_indices and aggregation_weights
40482// values belong. sample_indices[i] must equal b * nf + f, where nf is the
40483// number of features from the corresponding table, f is in [0, nf), and
40484// b is in [0, batch size).
40485//	embedding_indices: A list of rank 1 Tensors, indices into the embedding tables.
40486//	aggregation_weights: A list of rank 1 Tensors containing per sample -- i.e. per
40487// (training example, feature) -- aggregation weights.
40488//	mode_override: A string input that overrides the mode specified in the
40489// TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',
40490// 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set
40491// in TPUEmbeddingConfiguration is used, otherwise mode_override is used.
40492//
40493// Returns the created operation.
40494func EnqueueTPUEmbeddingSparseBatch(scope *Scope, sample_indices []tf.Output, embedding_indices []tf.Output, aggregation_weights []tf.Output, mode_override tf.Output, optional ...EnqueueTPUEmbeddingSparseBatchAttr) (o *tf.Operation) {
40495	if scope.Err() != nil {
40496		return
40497	}
40498	attrs := map[string]interface{}{}
40499	for _, a := range optional {
40500		a(attrs)
40501	}
40502	opspec := tf.OpSpec{
40503		Type: "EnqueueTPUEmbeddingSparseBatch",
40504		Input: []tf.Input{
40505			tf.OutputList(sample_indices), tf.OutputList(embedding_indices), tf.OutputList(aggregation_weights), mode_override,
40506		},
40507		Attrs: attrs,
40508	}
40509	return scope.AddOperation(opspec)
40510}
40511
40512// StatelessRandomUniformV2Attr is an optional argument to StatelessRandomUniformV2.
40513type StatelessRandomUniformV2Attr func(optionalAttr)
40514
40515// StatelessRandomUniformV2Dtype sets the optional dtype attribute to value.
40516//
40517// value: The type of the output.
40518// If not specified, defaults to DT_FLOAT
40519func StatelessRandomUniformV2Dtype(value tf.DataType) StatelessRandomUniformV2Attr {
40520	return func(m optionalAttr) {
40521		m["dtype"] = value
40522	}
40523}
40524
40525// Outputs deterministic pseudorandom random values from a uniform distribution.
40526//
40527// The generated values follow a uniform distribution in the range `[0, 1)`. The
40528// lower bound 0 is included in the range, while the upper bound 1 is excluded.
40529//
40530// The outputs are a deterministic function of `shape`, `key`, `counter` and `alg`.
40531//
40532// Arguments:
40533//	shape: The shape of the output tensor.
40534//	key: Key for the counter-based RNG algorithm (shape uint64[1]).
40535//	counter: Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used.
40536//	alg: The RNG algorithm (shape int32[]).
40537//
40538// Returns Random values with specified shape.
40539func StatelessRandomUniformV2(scope *Scope, shape tf.Output, key tf.Output, counter tf.Output, alg tf.Output, optional ...StatelessRandomUniformV2Attr) (output tf.Output) {
40540	if scope.Err() != nil {
40541		return
40542	}
40543	attrs := map[string]interface{}{}
40544	for _, a := range optional {
40545		a(attrs)
40546	}
40547	opspec := tf.OpSpec{
40548		Type: "StatelessRandomUniformV2",
40549		Input: []tf.Input{
40550			shape, key, counter, alg,
40551		},
40552		Attrs: attrs,
40553	}
40554	op := scope.AddOperation(opspec)
40555	return op.Output(0)
40556}
40557
40558//   This op is used as a placeholder in If branch functions. It doesn't provide a
40559//   valid output when run, so must either be removed (e.g. replaced with a
40560//   function input) or guaranteed not to be used (e.g. if mirroring an
40561//   intermediate output needed for the gradient computation of the other branch).
40562//
40563// Arguments:
40564//	dtype: The type of the output.
40565//	shape:     The purported shape of the output. This is only used for shape inference;
40566//     the output will not necessarily have this shape. Can be a partial shape.
40567//
40568// Returns     \"Fake\" output value. This should not be consumed by another op.
40569func FakeParam(scope *Scope, dtype tf.DataType, shape tf.Shape) (output tf.Output) {
40570	if scope.Err() != nil {
40571		return
40572	}
40573	attrs := map[string]interface{}{"dtype": dtype, "shape": shape}
40574	opspec := tf.OpSpec{
40575		Type: "FakeParam",
40576
40577		Attrs: attrs,
40578	}
40579	op := scope.AddOperation(opspec)
40580	return op.Output(0)
40581}
40582
40583// UnicodeTranscodeAttr is an optional argument to UnicodeTranscode.
40584type UnicodeTranscodeAttr func(optionalAttr)
40585
40586// UnicodeTranscodeErrors sets the optional errors attribute to value.
40587//
40588// value: Error handling policy when there is invalid formatting found in the input.
40589// The value of 'strict' will cause the operation to produce a InvalidArgument
40590// error on any invalid input formatting. A value of 'replace' (the default) will
40591// cause the operation to replace any invalid formatting in the input with the
40592// `replacement_char` codepoint. A value of 'ignore' will cause the operation to
40593// skip any invalid formatting in the input and produce no corresponding output
40594// character.
40595// If not specified, defaults to "replace"
40596func UnicodeTranscodeErrors(value string) UnicodeTranscodeAttr {
40597	return func(m optionalAttr) {
40598		m["errors"] = value
40599	}
40600}
40601
40602// UnicodeTranscodeReplacementChar sets the optional replacement_char attribute to value.
40603//
40604// value: The replacement character codepoint to be used in place of any invalid
40605// formatting in the input when `errors='replace'`. Any valid unicode codepoint may
40606// be used. The default value is the default unicode replacement character is
40607// 0xFFFD or U+65533.)
40608//
40609// Note that for UTF-8, passing a replacement character expressible in 1 byte, such
40610// as ' ', will preserve string alignment to the source since invalid bytes will be
40611// replaced with a 1-byte replacement. For UTF-16-BE and UTF-16-LE, any 1 or 2 byte
40612// replacement character will preserve byte alignment to the source.
40613// If not specified, defaults to 65533
40614func UnicodeTranscodeReplacementChar(value int64) UnicodeTranscodeAttr {
40615	return func(m optionalAttr) {
40616		m["replacement_char"] = value
40617	}
40618}
40619
40620// UnicodeTranscodeReplaceControlCharacters sets the optional replace_control_characters attribute to value.
40621//
40622// value: Whether to replace the C0 control characters (00-1F) with the
40623// `replacement_char`. Default is false.
40624// If not specified, defaults to false
40625func UnicodeTranscodeReplaceControlCharacters(value bool) UnicodeTranscodeAttr {
40626	return func(m optionalAttr) {
40627		m["replace_control_characters"] = value
40628	}
40629}
40630
40631// Transcode the input text from a source encoding to a destination encoding.
40632//
40633// The input is a string tensor of any shape. The output is a string tensor of
40634// the same shape containing the transcoded strings. Output strings are always
40635// valid unicode. If the input contains invalid encoding positions, the
40636// `errors` attribute sets the policy for how to deal with them. If the default
40637// error-handling policy is used, invalid formatting will be substituted in the
40638// output by the `replacement_char`. If the errors policy is to `ignore`, any
40639// invalid encoding positions in the input are skipped and not included in the
40640// output. If it set to `strict` then any invalid formatting will result in an
40641// InvalidArgument error.
40642//
40643// This operation can be used with `output_encoding = input_encoding` to enforce
40644// correct formatting for inputs even if they are already in the desired encoding.
40645//
40646// If the input is prefixed by a Byte Order Mark needed to determine encoding
40647// (e.g. if the encoding is UTF-16 and the BOM indicates big-endian), then that
40648// BOM will be consumed and not emitted into the output. If the input encoding
40649// is marked with an explicit endianness (e.g. UTF-16-BE), then the BOM is
40650// interpreted as a non-breaking-space and is preserved in the output (including
40651// always for UTF-8).
40652//
40653// The end result is that if the input is marked as an explicit endianness the
40654// transcoding is faithful to all codepoints in the source. If it is not marked
40655// with an explicit endianness, the BOM is not considered part of the string itself
40656// but as metadata, and so is not preserved in the output.
40657//
40658// Examples:
40659//
40660// >>> tf.strings.unicode_transcode(["Hello", "TensorFlow", "2.x"], "UTF-8", "UTF-16-BE")
40661// <tf.Tensor: shape=(3,), dtype=string, numpy=
40662// array([b'\x00H\x00e\x00l\x00l\x00o',
40663//        b'\x00T\x00e\x00n\x00s\x00o\x00r\x00F\x00l\x00o\x00w',
40664//        b'\x002\x00.\x00x'], dtype=object)>
40665// >>> tf.strings.unicode_transcode(["A", "B", "C"], "US ASCII", "UTF-8").numpy()
40666// array([b'A', b'B', b'C'], dtype=object)
40667//
40668// Arguments:
40669//	input: The text to be processed. Can have any shape.
40670//	input_encoding: Text encoding of the input strings. This is any of the encodings supported
40671// by ICU ucnv algorithmic converters. Examples: `"UTF-16", "US ASCII", "UTF-8"`.
40672//	output_encoding: The unicode encoding to use in the output. Must be one of
40673// `"UTF-8", "UTF-16-BE", "UTF-32-BE"`. Multi-byte encodings will be big-endian.
40674//
40675// Returns A string tensor containing unicode text encoded using `output_encoding`.
40676func UnicodeTranscode(scope *Scope, input tf.Output, input_encoding string, output_encoding string, optional ...UnicodeTranscodeAttr) (output tf.Output) {
40677	if scope.Err() != nil {
40678		return
40679	}
40680	attrs := map[string]interface{}{"input_encoding": input_encoding, "output_encoding": output_encoding}
40681	for _, a := range optional {
40682		a(attrs)
40683	}
40684	opspec := tf.OpSpec{
40685		Type: "UnicodeTranscode",
40686		Input: []tf.Input{
40687			input,
40688		},
40689		Attrs: attrs,
40690	}
40691	op := scope.AddOperation(opspec)
40692	return op.Output(0)
40693}
40694
40695// Returns x // y element-wise.
40696//
40697// *NOTE*: `FloorDiv` supports broadcasting. More about broadcasting
40698// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
40699func FloorDiv(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
40700	if scope.Err() != nil {
40701		return
40702	}
40703	opspec := tf.OpSpec{
40704		Type: "FloorDiv",
40705		Input: []tf.Input{
40706			x, y,
40707		},
40708	}
40709	op := scope.AddOperation(opspec)
40710	return op.Output(0)
40711}
40712
40713// QuantizedReluXAttr is an optional argument to QuantizedReluX.
40714type QuantizedReluXAttr func(optionalAttr)
40715
40716// QuantizedReluXOutType sets the optional out_type attribute to value.
40717// If not specified, defaults to DT_QUINT8
40718func QuantizedReluXOutType(value tf.DataType) QuantizedReluXAttr {
40719	return func(m optionalAttr) {
40720		m["out_type"] = value
40721	}
40722}
40723
40724// Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)`
40725//
40726// Arguments:
40727//
40728//
40729//	min_features: The float value that the lowest quantized value represents.
40730//	max_features: The float value that the highest quantized value represents.
40731//
40732// Returns:
40733//	activations: Has the same output shape as "features".
40734//	min_activations: The float value that the lowest quantized value represents.
40735//	max_activations: The float value that the highest quantized value represents.
40736func QuantizedReluX(scope *Scope, features tf.Output, max_value tf.Output, min_features tf.Output, max_features tf.Output, optional ...QuantizedReluXAttr) (activations tf.Output, min_activations tf.Output, max_activations tf.Output) {
40737	if scope.Err() != nil {
40738		return
40739	}
40740	attrs := map[string]interface{}{}
40741	for _, a := range optional {
40742		a(attrs)
40743	}
40744	opspec := tf.OpSpec{
40745		Type: "QuantizedReluX",
40746		Input: []tf.Input{
40747			features, max_value, min_features, max_features,
40748		},
40749		Attrs: attrs,
40750	}
40751	op := scope.AddOperation(opspec)
40752	return op.Output(0), op.Output(1), op.Output(2)
40753}
40754
40755// PrelinearizeTupleAttr is an optional argument to PrelinearizeTuple.
40756type PrelinearizeTupleAttr func(optionalAttr)
40757
40758// PrelinearizeTupleLayouts sets the optional layouts attribute to value.
40759//
40760// value: A vector holding the requested layout in minor-to-major sequence for all the
40761// tuple shapes in the order the shapes appear in the "shapes" input. The layout
40762// elements for a sub-shape can be set to -1 in which case the corresponding layout
40763// will be computed by the infeed operation.
40764// If not specified, defaults to <>
40765func PrelinearizeTupleLayouts(value []int64) PrelinearizeTupleAttr {
40766	return func(m optionalAttr) {
40767		m["layouts"] = value
40768	}
40769}
40770
40771// An op which linearizes multiple Tensor values to an opaque variant tensor.
40772//
40773// Arguments:
40774//	inputs: A list of tensors that will be provided using the infeed mechanism.
40775//	shapes: The shapes of each tensor in `inputs`.
40776func PrelinearizeTuple(scope *Scope, inputs []tf.Output, shapes []tf.Shape, optional ...PrelinearizeTupleAttr) (output tf.Output) {
40777	if scope.Err() != nil {
40778		return
40779	}
40780	attrs := map[string]interface{}{"shapes": shapes}
40781	for _, a := range optional {
40782		a(attrs)
40783	}
40784	opspec := tf.OpSpec{
40785		Type: "PrelinearizeTuple",
40786		Input: []tf.Input{
40787			tf.OutputList(inputs),
40788		},
40789		Attrs: attrs,
40790	}
40791	op := scope.AddOperation(opspec)
40792	return op.Output(0)
40793}
40794
40795// Forwards `data` to the output port determined by `pred`.
40796//
40797// If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise,
40798// the data goes to `output_false`.
40799//
40800// See also `RefSwitch` and `Merge`.
40801//
40802// Arguments:
40803//	data: The tensor to be forwarded to the appropriate output.
40804//	pred: A scalar that specifies which output port will receive data.
40805//
40806// Returns:
40807//	output_false: If `pred` is false, data will be forwarded to this output.
40808//	output_true: If `pred` is true, data will be forwarded to this output.
40809func Switch(scope *Scope, data tf.Output, pred tf.Output) (output_false tf.Output, output_true tf.Output) {
40810	if scope.Err() != nil {
40811		return
40812	}
40813	opspec := tf.OpSpec{
40814		Type: "Switch",
40815		Input: []tf.Input{
40816			data, pred,
40817		},
40818	}
40819	op := scope.AddOperation(opspec)
40820	return op.Output(0), op.Output(1)
40821}
40822
40823// RetrieveTPUEmbeddingFTRLParametersGradAccumDebugAttr is an optional argument to RetrieveTPUEmbeddingFTRLParametersGradAccumDebug.
40824type RetrieveTPUEmbeddingFTRLParametersGradAccumDebugAttr func(optionalAttr)
40825
40826// RetrieveTPUEmbeddingFTRLParametersGradAccumDebugTableId sets the optional table_id attribute to value.
40827// If not specified, defaults to -1
40828func RetrieveTPUEmbeddingFTRLParametersGradAccumDebugTableId(value int64) RetrieveTPUEmbeddingFTRLParametersGradAccumDebugAttr {
40829	return func(m optionalAttr) {
40830		m["table_id"] = value
40831	}
40832}
40833
40834// RetrieveTPUEmbeddingFTRLParametersGradAccumDebugTableName sets the optional table_name attribute to value.
40835// If not specified, defaults to ""
40836func RetrieveTPUEmbeddingFTRLParametersGradAccumDebugTableName(value string) RetrieveTPUEmbeddingFTRLParametersGradAccumDebugAttr {
40837	return func(m optionalAttr) {
40838		m["table_name"] = value
40839	}
40840}
40841
40842// RetrieveTPUEmbeddingFTRLParametersGradAccumDebugConfig sets the optional config attribute to value.
40843// If not specified, defaults to ""
40844func RetrieveTPUEmbeddingFTRLParametersGradAccumDebugConfig(value string) RetrieveTPUEmbeddingFTRLParametersGradAccumDebugAttr {
40845	return func(m optionalAttr) {
40846		m["config"] = value
40847	}
40848}
40849
40850// Retrieve FTRL embedding parameters with debug support.
40851//
40852// An op that retrieves optimization parameters from embedding to host
40853// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
40854// the correct embedding table configuration. For example, this op is
40855// used to retrieve updated parameters before saving a checkpoint.
40856//
40857// Returns:
40858//	parameters: Parameter parameters updated by the FTRL optimization algorithm.
40859//	accumulators: Parameter accumulators updated by the FTRL optimization algorithm.
40860//	linears: Parameter linears updated by the FTRL optimization algorithm.
40861//	gradient_accumulators: Parameter gradient_accumulators updated by the FTRL optimization algorithm.
40862func RetrieveTPUEmbeddingFTRLParametersGradAccumDebug(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingFTRLParametersGradAccumDebugAttr) (parameters tf.Output, accumulators tf.Output, linears tf.Output, gradient_accumulators tf.Output) {
40863	if scope.Err() != nil {
40864		return
40865	}
40866	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
40867	for _, a := range optional {
40868		a(attrs)
40869	}
40870	opspec := tf.OpSpec{
40871		Type: "RetrieveTPUEmbeddingFTRLParametersGradAccumDebug",
40872
40873		Attrs: attrs,
40874	}
40875	op := scope.AddOperation(opspec)
40876	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
40877}
40878
40879// UnicodeEncodeAttr is an optional argument to UnicodeEncode.
40880type UnicodeEncodeAttr func(optionalAttr)
40881
40882// UnicodeEncodeErrors sets the optional errors attribute to value.
40883//
40884// value: Error handling policy when there is invalid formatting found in the input.
40885// The value of 'strict' will cause the operation to produce a InvalidArgument
40886// error on any invalid input formatting. A value of 'replace' (the default) will
40887// cause the operation to replace any invalid formatting in the input with the
40888// `replacement_char` codepoint. A value of 'ignore' will cause the operation to
40889// skip any invalid formatting in the input and produce no corresponding output
40890// character.
40891// If not specified, defaults to "replace"
40892func UnicodeEncodeErrors(value string) UnicodeEncodeAttr {
40893	return func(m optionalAttr) {
40894		m["errors"] = value
40895	}
40896}
40897
40898// UnicodeEncodeReplacementChar sets the optional replacement_char attribute to value.
40899//
40900// value: The replacement character codepoint to be used in place of any invalid
40901// formatting in the input when `errors='replace'`. Any valid unicode codepoint may
40902// be used. The default value is the default unicode replacement character is
40903// 0xFFFD (U+65533).
40904// If not specified, defaults to 65533
40905func UnicodeEncodeReplacementChar(value int64) UnicodeEncodeAttr {
40906	return func(m optionalAttr) {
40907		m["replacement_char"] = value
40908	}
40909}
40910
40911// Encode a tensor of ints into unicode strings.
40912//
40913// Returns a vector of strings, where `output[i]` is constructed by encoding the
40914// Unicode codepoints in `input_values[input_splits[i]:input_splits[i+1]]`
40915// using `output_encoding`.
40916//
40917// ---
40918//
40919// Example:
40920//
40921// ```
40922// input_values = [72, 101, 108, 108, 111, 87, 111, 114, 108, 100]
40923// input_splits = [0, 5, 10]
40924// output_encoding = 'UTF-8'
40925//
40926// output = ['Hello', 'World']
40927// ```
40928//
40929// Arguments:
40930//	input_values: A 1D tensor containing the unicode codepoints that should be encoded.
40931//	input_splits: A 1D tensor specifying how the unicode codepoints should be split into strings.
40932// In particular, `output[i]` is constructed by encoding the codepoints in the
40933// slice `input_values[input_splits[i]:input_splits[i+1]]`.
40934//	output_encoding: Unicode encoding of the output strings. Valid encodings are: `"UTF-8",
40935// "UTF-16-BE", and "UTF-32-BE"`.
40936//
40937// Returns The 1-D Tensor of strings encoded from the provided unicode codepoints.
40938func UnicodeEncode(scope *Scope, input_values tf.Output, input_splits tf.Output, output_encoding string, optional ...UnicodeEncodeAttr) (output tf.Output) {
40939	if scope.Err() != nil {
40940		return
40941	}
40942	attrs := map[string]interface{}{"output_encoding": output_encoding}
40943	for _, a := range optional {
40944		a(attrs)
40945	}
40946	opspec := tf.OpSpec{
40947		Type: "UnicodeEncode",
40948		Input: []tf.Input{
40949			input_values, input_splits,
40950		},
40951		Attrs: attrs,
40952	}
40953	op := scope.AddOperation(opspec)
40954	return op.Output(0)
40955}
40956
40957// ResourceSparseApplyAdagradAttr is an optional argument to ResourceSparseApplyAdagrad.
40958type ResourceSparseApplyAdagradAttr func(optionalAttr)
40959
40960// ResourceSparseApplyAdagradUseLocking sets the optional use_locking attribute to value.
40961//
40962// value: If `True`, updating of the var and accum tensors will be protected
40963// by a lock; otherwise the behavior is undefined, but may exhibit less
40964// contention.
40965// If not specified, defaults to false
40966func ResourceSparseApplyAdagradUseLocking(value bool) ResourceSparseApplyAdagradAttr {
40967	return func(m optionalAttr) {
40968		m["use_locking"] = value
40969	}
40970}
40971
40972// ResourceSparseApplyAdagradUpdateSlots sets the optional update_slots attribute to value.
40973// If not specified, defaults to true
40974func ResourceSparseApplyAdagradUpdateSlots(value bool) ResourceSparseApplyAdagradAttr {
40975	return func(m optionalAttr) {
40976		m["update_slots"] = value
40977	}
40978}
40979
40980// Update relevant entries in '*var' and '*accum' according to the adagrad scheme.
40981//
40982// That is for rows we have grad for, we update var and accum as follows:
40983// accum += grad * grad
40984// var -= lr * grad * (1 / sqrt(accum))
40985//
40986// Arguments:
40987//	var_: Should be from a Variable().
40988//	accum: Should be from a Variable().
40989//	lr: Learning rate. Must be a scalar.
40990//	grad: The gradient.
40991//	indices: A vector of indices into the first dimension of var and accum.
40992//
40993// Returns the created operation.
40994func ResourceSparseApplyAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyAdagradAttr) (o *tf.Operation) {
40995	if scope.Err() != nil {
40996		return
40997	}
40998	attrs := map[string]interface{}{}
40999	for _, a := range optional {
41000		a(attrs)
41001	}
41002	opspec := tf.OpSpec{
41003		Type: "ResourceSparseApplyAdagrad",
41004		Input: []tf.Input{
41005			var_, accum, lr, grad, indices,
41006		},
41007		Attrs: attrs,
41008	}
41009	return scope.AddOperation(opspec)
41010}
41011
41012// OutfeedDequeueTupleAttr is an optional argument to OutfeedDequeueTuple.
41013type OutfeedDequeueTupleAttr func(optionalAttr)
41014
41015// OutfeedDequeueTupleDeviceOrdinal sets the optional device_ordinal attribute to value.
41016//
41017// value: The TPU device to use. This should be -1 when the Op
41018// is running on a TPU device, and >= 0 when the Op is running on the CPU
41019// device.
41020// If not specified, defaults to -1
41021func OutfeedDequeueTupleDeviceOrdinal(value int64) OutfeedDequeueTupleAttr {
41022	return func(m optionalAttr) {
41023		m["device_ordinal"] = value
41024	}
41025}
41026
41027// Retrieve multiple values from the computation outfeed.
41028//
41029// This operation will block indefinitely until data is available. Output `i`
41030// corresponds to XLA tuple element `i`.
41031//
41032// Arguments:
41033//	dtypes: The element types of each element in `outputs`.
41034//	shapes: The shapes of each tensor in `outputs`.
41035//
41036// Returns A list of tensors that will be read from the outfeed.
41037func OutfeedDequeueTuple(scope *Scope, dtypes []tf.DataType, shapes []tf.Shape, optional ...OutfeedDequeueTupleAttr) (outputs []tf.Output) {
41038	if scope.Err() != nil {
41039		return
41040	}
41041	attrs := map[string]interface{}{"dtypes": dtypes, "shapes": shapes}
41042	for _, a := range optional {
41043		a(attrs)
41044	}
41045	opspec := tf.OpSpec{
41046		Type: "OutfeedDequeueTuple",
41047
41048		Attrs: attrs,
41049	}
41050	op := scope.AddOperation(opspec)
41051	if scope.Err() != nil {
41052		return
41053	}
41054	var idx int
41055	var err error
41056	if outputs, idx, err = makeOutputList(op, idx, "outputs"); err != nil {
41057		scope.UpdateErr("OutfeedDequeueTuple", err)
41058		return
41059	}
41060	return outputs
41061}
41062
41063// Deserialize bucket boundaries and ready flag into current QuantileAccumulator.
41064//
41065// An op that deserializes bucket boundaries and are boundaries ready flag into current QuantileAccumulator.
41066//
41067// Arguments:
41068//	quantile_stream_resource_handle: resource handle referring to a QuantileStreamResource.
41069//	bucket_boundaries: float; List of Rank 1 Tensors each containing the bucket boundaries for a feature.
41070//
41071// Returns the created operation.
41072func BoostedTreesQuantileStreamResourceDeserialize(scope *Scope, quantile_stream_resource_handle tf.Output, bucket_boundaries []tf.Output) (o *tf.Operation) {
41073	if scope.Err() != nil {
41074		return
41075	}
41076	opspec := tf.OpSpec{
41077		Type: "BoostedTreesQuantileStreamResourceDeserialize",
41078		Input: []tf.Input{
41079			quantile_stream_resource_handle, tf.OutputList(bucket_boundaries),
41080		},
41081	}
41082	return scope.AddOperation(opspec)
41083}
41084
41085// ResourceApplyAdadeltaAttr is an optional argument to ResourceApplyAdadelta.
41086type ResourceApplyAdadeltaAttr func(optionalAttr)
41087
41088// ResourceApplyAdadeltaUseLocking sets the optional use_locking attribute to value.
41089//
41090// value: If True, updating of the var, accum and update_accum tensors will be protected by
41091// a lock; otherwise the behavior is undefined, but may exhibit less contention.
41092// If not specified, defaults to false
41093func ResourceApplyAdadeltaUseLocking(value bool) ResourceApplyAdadeltaAttr {
41094	return func(m optionalAttr) {
41095		m["use_locking"] = value
41096	}
41097}
41098
41099// Update '*var' according to the adadelta scheme.
41100//
41101// accum = rho() * accum + (1 - rho()) * grad.square();
41102// update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad;
41103// update_accum = rho() * update_accum + (1 - rho()) * update.square();
41104// var -= update;
41105//
41106// Arguments:
41107//	var_: Should be from a Variable().
41108//	accum: Should be from a Variable().
41109//	accum_update: Should be from a Variable().
41110//	lr: Scaling factor. Must be a scalar.
41111//	rho: Decay factor. Must be a scalar.
41112//	epsilon: Constant factor. Must be a scalar.
41113//	grad: The gradient.
41114//
41115// Returns the created operation.
41116func ResourceApplyAdadelta(scope *Scope, var_ tf.Output, accum tf.Output, accum_update tf.Output, lr tf.Output, rho tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyAdadeltaAttr) (o *tf.Operation) {
41117	if scope.Err() != nil {
41118		return
41119	}
41120	attrs := map[string]interface{}{}
41121	for _, a := range optional {
41122		a(attrs)
41123	}
41124	opspec := tf.OpSpec{
41125		Type: "ResourceApplyAdadelta",
41126		Input: []tf.Input{
41127			var_, accum, accum_update, lr, rho, epsilon, grad,
41128		},
41129		Attrs: attrs,
41130	}
41131	return scope.AddOperation(opspec)
41132}
41133
41134// Converts each string in the input Tensor to its hash mod by a number of buckets.
41135//
41136// The hash function is deterministic on the content of the string within the
41137// process and will never change. However, it is not suitable for cryptography.
41138// This function may be used when CPU time is scarce and inputs are trusted or
41139// unimportant. There is a risk of adversaries constructing inputs that all hash
41140// to the same bucket. To prevent this problem, use a strong hash function with
41141// `tf.string_to_hash_bucket_strong`.
41142//
41143// Examples:
41144//
41145// >>> tf.strings.to_hash_bucket_fast(["Hello", "TensorFlow", "2.x"], 3).numpy()
41146// array([0, 2, 2])
41147//
41148// Arguments:
41149//	input: The strings to assign a hash bucket.
41150//	num_buckets: The number of buckets.
41151//
41152// Returns A Tensor of the same shape as the input `string_tensor`.
41153func StringToHashBucketFast(scope *Scope, input tf.Output, num_buckets int64) (output tf.Output) {
41154	if scope.Err() != nil {
41155		return
41156	}
41157	attrs := map[string]interface{}{"num_buckets": num_buckets}
41158	opspec := tf.OpSpec{
41159		Type: "StringToHashBucketFast",
41160		Input: []tf.Input{
41161			input,
41162		},
41163		Attrs: attrs,
41164	}
41165	op := scope.AddOperation(opspec)
41166	return op.Output(0)
41167}
41168
41169// StringJoinAttr is an optional argument to StringJoin.
41170type StringJoinAttr func(optionalAttr)
41171
41172// StringJoinSeparator sets the optional separator attribute to value.
41173//
41174// value: string, an optional join separator.
41175// If not specified, defaults to ""
41176func StringJoinSeparator(value string) StringJoinAttr {
41177	return func(m optionalAttr) {
41178		m["separator"] = value
41179	}
41180}
41181
41182// Joins the strings in the given list of string tensors into one tensor;
41183//
41184// with the given separator (default is an empty separator).
41185//
41186// Examples:
41187//
41188// >>> s = ["hello", "world", "tensorflow"]
41189// >>> tf.strings.join(s, " ")
41190// <tf.Tensor: shape=(), dtype=string, numpy=b'hello world tensorflow'>
41191//
41192// Arguments:
41193//	inputs: A list of string tensors.  The tensors must all have the same shape,
41194// or be scalars.  Scalars may be mixed in; these will be broadcast to the shape
41195// of non-scalar inputs.
41196func StringJoin(scope *Scope, inputs []tf.Output, optional ...StringJoinAttr) (output tf.Output) {
41197	if scope.Err() != nil {
41198		return
41199	}
41200	attrs := map[string]interface{}{}
41201	for _, a := range optional {
41202		a(attrs)
41203	}
41204	opspec := tf.OpSpec{
41205		Type: "StringJoin",
41206		Input: []tf.Input{
41207			tf.OutputList(inputs),
41208		},
41209		Attrs: attrs,
41210	}
41211	op := scope.AddOperation(opspec)
41212	return op.Output(0)
41213}
41214
41215// Replaces the contents of the table with the specified keys and values.
41216//
41217// The tensor `keys` must be of the same type as the keys of the table.
41218// The tensor `values` must be of the type of the table values.
41219//
41220// Arguments:
41221//	table_handle: Handle to the table.
41222//	keys: Any shape.  Keys to look up.
41223//	values: Values to associate with keys.
41224//
41225// Returns the created operation.
41226func LookupTableImportV2(scope *Scope, table_handle tf.Output, keys tf.Output, values tf.Output) (o *tf.Operation) {
41227	if scope.Err() != nil {
41228		return
41229	}
41230	opspec := tf.OpSpec{
41231		Type: "LookupTableImportV2",
41232		Input: []tf.Input{
41233			table_handle, keys, values,
41234		},
41235	}
41236	return scope.AddOperation(opspec)
41237}
41238
41239// ImageProjectiveTransformV3Attr is an optional argument to ImageProjectiveTransformV3.
41240type ImageProjectiveTransformV3Attr func(optionalAttr)
41241
41242// ImageProjectiveTransformV3FillMode sets the optional fill_mode attribute to value.
41243//
41244// value: Fill mode, "REFLECT", "WRAP", "CONSTANT", or "NEAREST".
41245// If not specified, defaults to "CONSTANT"
41246func ImageProjectiveTransformV3FillMode(value string) ImageProjectiveTransformV3Attr {
41247	return func(m optionalAttr) {
41248		m["fill_mode"] = value
41249	}
41250}
41251
41252// Applies the given transform to each of the images.
41253//
41254// If one row of `transforms` is `[a0, a1, a2, b0, b1, b2, c0, c1]`, then it maps
41255// the *output* point `(x, y)` to a transformed *input* point
41256// `(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`, where
41257// `k = c0 x + c1 y + 1`. If the transformed point lays outside of the input
41258// image, the output pixel is set to fill_value.
41259//
41260// Arguments:
41261//	images: 4-D with shape `[batch, height, width, channels]`.
41262//	transforms: 2-D Tensor, `[batch, 8]` or `[1, 8]` matrix, where each row corresponds to a 3 x 3
41263// projective transformation matrix, with the last entry assumed to be 1. If there
41264// is one row, the same transformation will be applied to all images.
41265//	output_shape: 1-D Tensor [new_height, new_width].
41266//	fill_value: float, the value to be filled when fill_mode is constant".
41267//	interpolation: Interpolation method, "NEAREST" or "BILINEAR".
41268//
41269// Returns 4-D with shape
41270// `[batch, new_height, new_width, channels]`.
41271func ImageProjectiveTransformV3(scope *Scope, images tf.Output, transforms tf.Output, output_shape tf.Output, fill_value tf.Output, interpolation string, optional ...ImageProjectiveTransformV3Attr) (transformed_images tf.Output) {
41272	if scope.Err() != nil {
41273		return
41274	}
41275	attrs := map[string]interface{}{"interpolation": interpolation}
41276	for _, a := range optional {
41277		a(attrs)
41278	}
41279	opspec := tf.OpSpec{
41280		Type: "ImageProjectiveTransformV3",
41281		Input: []tf.Input{
41282			images, transforms, output_shape, fill_value,
41283		},
41284		Attrs: attrs,
41285	}
41286	op := scope.AddOperation(opspec)
41287	return op.Output(0)
41288}
41289
41290// LoadTPUEmbeddingMomentumParametersAttr is an optional argument to LoadTPUEmbeddingMomentumParameters.
41291type LoadTPUEmbeddingMomentumParametersAttr func(optionalAttr)
41292
41293// LoadTPUEmbeddingMomentumParametersTableId sets the optional table_id attribute to value.
41294// If not specified, defaults to -1
41295func LoadTPUEmbeddingMomentumParametersTableId(value int64) LoadTPUEmbeddingMomentumParametersAttr {
41296	return func(m optionalAttr) {
41297		m["table_id"] = value
41298	}
41299}
41300
41301// LoadTPUEmbeddingMomentumParametersTableName sets the optional table_name attribute to value.
41302// If not specified, defaults to ""
41303func LoadTPUEmbeddingMomentumParametersTableName(value string) LoadTPUEmbeddingMomentumParametersAttr {
41304	return func(m optionalAttr) {
41305		m["table_name"] = value
41306	}
41307}
41308
41309// LoadTPUEmbeddingMomentumParametersConfig sets the optional config attribute to value.
41310// If not specified, defaults to ""
41311func LoadTPUEmbeddingMomentumParametersConfig(value string) LoadTPUEmbeddingMomentumParametersAttr {
41312	return func(m optionalAttr) {
41313		m["config"] = value
41314	}
41315}
41316
41317// Load Momentum embedding parameters.
41318//
41319// An op that loads optimization parameters into HBM for embedding. Must be
41320// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
41321// embedding table configuration. For example, this op is used to install
41322// parameters that are loaded from a checkpoint before a training loop is
41323// executed.
41324//
41325// Arguments:
41326//	parameters: Value of parameters used in the Momentum optimization algorithm.
41327//	momenta: Value of momenta used in the Momentum optimization algorithm.
41328//
41329//
41330//
41331// Returns the created operation.
41332func LoadTPUEmbeddingMomentumParameters(scope *Scope, parameters tf.Output, momenta tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingMomentumParametersAttr) (o *tf.Operation) {
41333	if scope.Err() != nil {
41334		return
41335	}
41336	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
41337	for _, a := range optional {
41338		a(attrs)
41339	}
41340	opspec := tf.OpSpec{
41341		Type: "LoadTPUEmbeddingMomentumParameters",
41342		Input: []tf.Input{
41343			parameters, momenta,
41344		},
41345		Attrs: attrs,
41346	}
41347	return scope.AddOperation(opspec)
41348}
41349
41350// SkipgramAttr is an optional argument to Skipgram.
41351type SkipgramAttr func(optionalAttr)
41352
41353// SkipgramWindowSize sets the optional window_size attribute to value.
41354//
41355// value: The number of words to predict to the left and right of the target.
41356// If not specified, defaults to 5
41357func SkipgramWindowSize(value int64) SkipgramAttr {
41358	return func(m optionalAttr) {
41359		m["window_size"] = value
41360	}
41361}
41362
41363// SkipgramMinCount sets the optional min_count attribute to value.
41364//
41365// value: The minimum number of word occurrences for it to be included in the
41366// vocabulary.
41367// If not specified, defaults to 5
41368func SkipgramMinCount(value int64) SkipgramAttr {
41369	return func(m optionalAttr) {
41370		m["min_count"] = value
41371	}
41372}
41373
41374// SkipgramSubsample sets the optional subsample attribute to value.
41375//
41376// value: Threshold for word occurrence. Words that appear with higher
41377// frequency will be randomly down-sampled. Set to 0 to disable.
41378// If not specified, defaults to 0.001
41379func SkipgramSubsample(value float32) SkipgramAttr {
41380	return func(m optionalAttr) {
41381		m["subsample"] = value
41382	}
41383}
41384
41385// Parses a text file and creates a batch of examples.
41386//
41387// DEPRECATED at GraphDef version 19: Moving word2vec into tensorflow_models/tutorials and deprecating its ops here as a result
41388//
41389// Arguments:
41390//	filename: The corpus's text file name.
41391//	batch_size: The size of produced batch.
41392//
41393// Returns:
41394//	vocab_word: A vector of words in the corpus.
41395//	vocab_freq: Frequencies of words. Sorted in the non-ascending order.
41396//	words_per_epoch: Number of words per epoch in the data file.
41397//	current_epoch: The current epoch number.
41398//	total_words_processed: The total number of words processed so far.
41399//	examples: A vector of word ids.
41400//	labels: A vector of word ids.
41401func Skipgram(scope *Scope, filename string, batch_size int64, optional ...SkipgramAttr) (vocab_word tf.Output, vocab_freq tf.Output, words_per_epoch tf.Output, current_epoch tf.Output, total_words_processed tf.Output, examples tf.Output, labels tf.Output) {
41402	if scope.Err() != nil {
41403		return
41404	}
41405	attrs := map[string]interface{}{"filename": filename, "batch_size": batch_size}
41406	for _, a := range optional {
41407		a(attrs)
41408	}
41409	opspec := tf.OpSpec{
41410		Type: "Skipgram",
41411
41412		Attrs: attrs,
41413	}
41414	op := scope.AddOperation(opspec)
41415	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4), op.Output(5), op.Output(6)
41416}
41417
41418// StaticRegexReplaceAttr is an optional argument to StaticRegexReplace.
41419type StaticRegexReplaceAttr func(optionalAttr)
41420
41421// StaticRegexReplaceReplaceGlobal sets the optional replace_global attribute to value.
41422//
41423// value: If True, the replacement is global, otherwise the replacement
41424// is done only on the first match.
41425// If not specified, defaults to true
41426func StaticRegexReplaceReplaceGlobal(value bool) StaticRegexReplaceAttr {
41427	return func(m optionalAttr) {
41428		m["replace_global"] = value
41429	}
41430}
41431
41432// Replaces the match of pattern in input with rewrite.
41433//
41434// It follows the re2 syntax (https://github.com/google/re2/wiki/Syntax)
41435//
41436// Arguments:
41437//	input: The text to be processed.
41438//	pattern: The regular expression to match the input.
41439//	rewrite: The rewrite to be applied to the matched expression.
41440//
41441// Returns The text after applying pattern and rewrite.
41442func StaticRegexReplace(scope *Scope, input tf.Output, pattern string, rewrite string, optional ...StaticRegexReplaceAttr) (output tf.Output) {
41443	if scope.Err() != nil {
41444		return
41445	}
41446	attrs := map[string]interface{}{"pattern": pattern, "rewrite": rewrite}
41447	for _, a := range optional {
41448		a(attrs)
41449	}
41450	opspec := tf.OpSpec{
41451		Type: "StaticRegexReplace",
41452		Input: []tf.Input{
41453			input,
41454		},
41455		Attrs: attrs,
41456	}
41457	op := scope.AddOperation(opspec)
41458	return op.Output(0)
41459}
41460
41461// Returns which elements of x are finite.
41462//
41463// @compatibility(numpy)
41464// Equivalent to np.isfinite
41465// @end_compatibility
41466//
41467// Example:
41468//
41469// ```python
41470// x = tf.constant([5.0, 4.8, 6.8, np.inf, np.nan])
41471// tf.math.is_finite(x) ==> [True, True, True, False, False]
41472// ```
41473func IsFinite(scope *Scope, x tf.Output) (y tf.Output) {
41474	if scope.Err() != nil {
41475		return
41476	}
41477	opspec := tf.OpSpec{
41478		Type: "IsFinite",
41479		Input: []tf.Input{
41480			x,
41481		},
41482	}
41483	op := scope.AddOperation(opspec)
41484	return op.Output(0)
41485}
41486
41487// Returns a tensor of zeros with the same shape and type as x.
41488//
41489// Arguments:
41490//	x: a tensor of type T.
41491//
41492// Returns a tensor of the same shape and type as x but filled with zeros.
41493func ZerosLike(scope *Scope, x tf.Output) (y tf.Output) {
41494	if scope.Err() != nil {
41495		return
41496	}
41497	opspec := tf.OpSpec{
41498		Type: "ZerosLike",
41499		Input: []tf.Input{
41500			x,
41501		},
41502	}
41503	op := scope.AddOperation(opspec)
41504	return op.Output(0)
41505}
41506
41507// RetrieveTPUEmbeddingAdadeltaParametersAttr is an optional argument to RetrieveTPUEmbeddingAdadeltaParameters.
41508type RetrieveTPUEmbeddingAdadeltaParametersAttr func(optionalAttr)
41509
41510// RetrieveTPUEmbeddingAdadeltaParametersTableId sets the optional table_id attribute to value.
41511// If not specified, defaults to -1
41512func RetrieveTPUEmbeddingAdadeltaParametersTableId(value int64) RetrieveTPUEmbeddingAdadeltaParametersAttr {
41513	return func(m optionalAttr) {
41514		m["table_id"] = value
41515	}
41516}
41517
41518// RetrieveTPUEmbeddingAdadeltaParametersTableName sets the optional table_name attribute to value.
41519// If not specified, defaults to ""
41520func RetrieveTPUEmbeddingAdadeltaParametersTableName(value string) RetrieveTPUEmbeddingAdadeltaParametersAttr {
41521	return func(m optionalAttr) {
41522		m["table_name"] = value
41523	}
41524}
41525
41526// RetrieveTPUEmbeddingAdadeltaParametersConfig sets the optional config attribute to value.
41527// If not specified, defaults to ""
41528func RetrieveTPUEmbeddingAdadeltaParametersConfig(value string) RetrieveTPUEmbeddingAdadeltaParametersAttr {
41529	return func(m optionalAttr) {
41530		m["config"] = value
41531	}
41532}
41533
41534// Retrieve Adadelta embedding parameters.
41535//
41536// An op that retrieves optimization parameters from embedding to host
41537// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
41538// the correct embedding table configuration. For example, this op is
41539// used to retrieve updated parameters before saving a checkpoint.
41540//
41541// Returns:
41542//	parameters: Parameter parameters updated by the Adadelta optimization algorithm.
41543//	accumulators: Parameter accumulators updated by the Adadelta optimization algorithm.
41544//	updates: Parameter updates updated by the Adadelta optimization algorithm.
41545func RetrieveTPUEmbeddingAdadeltaParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingAdadeltaParametersAttr) (parameters tf.Output, accumulators tf.Output, updates tf.Output) {
41546	if scope.Err() != nil {
41547		return
41548	}
41549	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
41550	for _, a := range optional {
41551		a(attrs)
41552	}
41553	opspec := tf.OpSpec{
41554		Type: "RetrieveTPUEmbeddingAdadeltaParameters",
41555
41556		Attrs: attrs,
41557	}
41558	op := scope.AddOperation(opspec)
41559	return op.Output(0), op.Output(1), op.Output(2)
41560}
41561
41562// RegexReplaceAttr is an optional argument to RegexReplace.
41563type RegexReplaceAttr func(optionalAttr)
41564
41565// RegexReplaceReplaceGlobal sets the optional replace_global attribute to value.
41566//
41567// value: If True, the replacement is global (that is, all matches of the `pattern` regular
41568// expression in each input string are rewritten), otherwise the `rewrite`
41569// substitution is only made for the first `pattern` match.
41570// If not specified, defaults to true
41571func RegexReplaceReplaceGlobal(value bool) RegexReplaceAttr {
41572	return func(m optionalAttr) {
41573		m["replace_global"] = value
41574	}
41575}
41576
41577// Replaces matches of the `pattern` regular expression in `input` with the
41578// replacement string provided in `rewrite`.
41579//
41580// It follows the re2 syntax (https://github.com/google/re2/wiki/Syntax)
41581//
41582// Arguments:
41583//	input: The text to be processed.
41584//	pattern: The regular expression to be matched in the `input` strings.
41585//	rewrite: The rewrite string to be substituted for the `pattern` expression where it is
41586// matched in the `input` strings.
41587//
41588// Returns The text after applying pattern match and rewrite substitution.
41589func RegexReplace(scope *Scope, input tf.Output, pattern tf.Output, rewrite tf.Output, optional ...RegexReplaceAttr) (output tf.Output) {
41590	if scope.Err() != nil {
41591		return
41592	}
41593	attrs := map[string]interface{}{}
41594	for _, a := range optional {
41595		a(attrs)
41596	}
41597	opspec := tf.OpSpec{
41598		Type: "RegexReplace",
41599		Input: []tf.Input{
41600			input, pattern, rewrite,
41601		},
41602		Attrs: attrs,
41603	}
41604	op := scope.AddOperation(opspec)
41605	return op.Output(0)
41606}
41607
41608// Concatenates tensors along one dimension.
41609//
41610// Arguments:
41611//	concat_dim: 0-D.  The dimension along which to concatenate.  Must be in the
41612// range [0, rank(values)).
41613//	values: The `N` Tensors to concatenate. Their ranks and types must match,
41614// and their sizes must match in all dimensions except `concat_dim`.
41615//
41616// Returns A `Tensor` with the concatenation of values stacked along the
41617// `concat_dim` dimension.  This tensor's shape matches that of `values` except
41618// in `concat_dim` where it has the sum of the sizes.
41619func Concat(scope *Scope, concat_dim tf.Output, values []tf.Output) (output tf.Output) {
41620	if scope.Err() != nil {
41621		return
41622	}
41623	opspec := tf.OpSpec{
41624		Type: "Concat",
41625		Input: []tf.Input{
41626			concat_dim, tf.OutputList(values),
41627		},
41628	}
41629	op := scope.AddOperation(opspec)
41630	return op.Output(0)
41631}
41632
41633// ResourceApplyPowerSignAttr is an optional argument to ResourceApplyPowerSign.
41634type ResourceApplyPowerSignAttr func(optionalAttr)
41635
41636// ResourceApplyPowerSignUseLocking sets the optional use_locking attribute to value.
41637//
41638// value: If `True`, updating of the var and m tensors is
41639// protected by a lock; otherwise the behavior is undefined, but may exhibit less
41640// contention.
41641// If not specified, defaults to false
41642func ResourceApplyPowerSignUseLocking(value bool) ResourceApplyPowerSignAttr {
41643	return func(m optionalAttr) {
41644		m["use_locking"] = value
41645	}
41646}
41647
41648// Update '*var' according to the AddSign update.
41649//
41650// m_t <- beta1 * m_{t-1} + (1 - beta1) * g
41651// update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g
41652// variable <- variable - lr_t * update
41653//
41654// Arguments:
41655//	var_: Should be from a Variable().
41656//	m: Should be from a Variable().
41657//	lr: Scaling factor. Must be a scalar.
41658//	logbase: Must be a scalar.
41659//	sign_decay: Must be a scalar.
41660//	beta: Must be a scalar.
41661//	grad: The gradient.
41662//
41663// Returns the created operation.
41664func ResourceApplyPowerSign(scope *Scope, var_ tf.Output, m tf.Output, lr tf.Output, logbase tf.Output, sign_decay tf.Output, beta tf.Output, grad tf.Output, optional ...ResourceApplyPowerSignAttr) (o *tf.Operation) {
41665	if scope.Err() != nil {
41666		return
41667	}
41668	attrs := map[string]interface{}{}
41669	for _, a := range optional {
41670		a(attrs)
41671	}
41672	opspec := tf.OpSpec{
41673		Type: "ResourceApplyPowerSign",
41674		Input: []tf.Input{
41675			var_, m, lr, logbase, sign_decay, beta, grad,
41676		},
41677		Attrs: attrs,
41678	}
41679	return scope.AddOperation(opspec)
41680}
41681
41682// Converts a tensor to a scalar predicate.
41683//
41684// Converts a tensor to a scalar predicate with the following rules:
41685//
41686// - For 0D tensors, truthiness is determined by comparing against a "zero"
41687//   value. For numerical types it is the obvious zero. For strings it is the
41688//   empty string.
41689//
41690// - For >0D tensors, truthiness is determined by looking at the number of
41691//   elements. If has zero elements, then the result is false. Otherwise the
41692//   result is true.
41693//
41694// This matches the behavior of If and While for determining if a tensor counts
41695// as true/false for a branch condition.
41696func ToBool(scope *Scope, input tf.Output) (output tf.Output) {
41697	if scope.Err() != nil {
41698		return
41699	}
41700	opspec := tf.OpSpec{
41701		Type: "ToBool",
41702		Input: []tf.Input{
41703			input,
41704		},
41705	}
41706	op := scope.AddOperation(opspec)
41707	return op.Output(0)
41708}
41709
41710// GenerateBoundingBoxProposalsAttr is an optional argument to GenerateBoundingBoxProposals.
41711type GenerateBoundingBoxProposalsAttr func(optionalAttr)
41712
41713// GenerateBoundingBoxProposalsPostNmsTopn sets the optional post_nms_topn attribute to value.
41714//
41715// value: An integer. Maximum number of rois in the output.
41716// If not specified, defaults to 300
41717func GenerateBoundingBoxProposalsPostNmsTopn(value int64) GenerateBoundingBoxProposalsAttr {
41718	return func(m optionalAttr) {
41719		m["post_nms_topn"] = value
41720	}
41721}
41722
41723// This op produces Region of Interests from given bounding boxes(bbox_deltas) encoded wrt anchors according to eq.2 in arXiv:1506.01497
41724//
41725//       The op selects top `pre_nms_topn` scoring boxes, decodes them with respect to anchors,
41726//       applies non-maximal suppression on overlapping boxes with higher than
41727//       `nms_threshold` intersection-over-union (iou) value, discarding boxes where shorter
41728//       side is less than `min_size`.
41729//       Inputs:
41730//       `scores`: A 4D tensor of shape [Batch, Height, Width, Num Anchors] containing the scores per anchor at given position
41731//       `bbox_deltas`: is a tensor of shape [Batch, Height, Width, 4 x Num Anchors] boxes encoded to each anchor
41732//       `anchors`: A 1D tensor of shape [4 x Num Anchors], representing the anchors.
41733//       Outputs:
41734//       `rois`: output RoIs, a 3D tensor of shape [Batch, post_nms_topn, 4], padded by 0 if less than post_nms_topn candidates found.
41735//       `roi_probabilities`: probability scores of each roi in 'rois', a 2D tensor of shape [Batch,post_nms_topn], padded with 0 if needed, sorted by scores.
41736//
41737// Arguments:
41738//	scores: A 4-D float tensor of shape `[num_images, height, width, num_achors]` containing scores of the boxes for given anchors, can be unsorted.
41739//	bbox_deltas: A 4-D float tensor of shape `[num_images, height, width, 4 x num_anchors]`. encoding boxes with respec to each anchor.
41740// Coordinates are given in the form [dy, dx, dh, dw].
41741//	image_info: A 2-D float tensor of shape `[num_images, 5]` containing image information Height, Width, Scale.
41742//	anchors: A 2-D float tensor of shape `[num_anchors, 4]` describing the anchor boxes. Boxes are formatted in the form [y1, x1, y2, x2].
41743//	nms_threshold: A scalar float tensor for non-maximal-suppression threshold.
41744//	pre_nms_topn: A scalar int tensor for the number of top scoring boxes to be used as input.
41745//	min_size: A scalar float tensor. Any box that has a smaller size than min_size will be discarded.
41746//
41747// Returns:
41748//	rois: A 3-D float tensor of shape `[num_images,post_nms_topn,4]` representing the selected
41749// region of interest boxes. Sorted in descending order in scores.
41750//	roi_probabilities: A 2-D float tensor of shape `[num_images, post_nms_topn]` representing the score of the
41751// region of interest box in `rois` tensor at the same index.
41752func GenerateBoundingBoxProposals(scope *Scope, scores tf.Output, bbox_deltas tf.Output, image_info tf.Output, anchors tf.Output, nms_threshold tf.Output, pre_nms_topn tf.Output, min_size tf.Output, optional ...GenerateBoundingBoxProposalsAttr) (rois tf.Output, roi_probabilities tf.Output) {
41753	if scope.Err() != nil {
41754		return
41755	}
41756	attrs := map[string]interface{}{}
41757	for _, a := range optional {
41758		a(attrs)
41759	}
41760	opspec := tf.OpSpec{
41761		Type: "GenerateBoundingBoxProposals",
41762		Input: []tf.Input{
41763			scores, bbox_deltas, image_info, anchors, nms_threshold, pre_nms_topn, min_size,
41764		},
41765		Attrs: attrs,
41766	}
41767	op := scope.AddOperation(opspec)
41768	return op.Output(0), op.Output(1)
41769}
41770
41771// InitializeTableFromTextFileV2Attr is an optional argument to InitializeTableFromTextFileV2.
41772type InitializeTableFromTextFileV2Attr func(optionalAttr)
41773
41774// InitializeTableFromTextFileV2VocabSize sets the optional vocab_size attribute to value.
41775//
41776// value: Number of elements of the file, use -1 if unknown.
41777// If not specified, defaults to -1
41778//
41779// REQUIRES: value >= -1
41780func InitializeTableFromTextFileV2VocabSize(value int64) InitializeTableFromTextFileV2Attr {
41781	return func(m optionalAttr) {
41782		m["vocab_size"] = value
41783	}
41784}
41785
41786// InitializeTableFromTextFileV2Delimiter sets the optional delimiter attribute to value.
41787//
41788// value: Delimiter to separate fields in a line.
41789// If not specified, defaults to "\t"
41790func InitializeTableFromTextFileV2Delimiter(value string) InitializeTableFromTextFileV2Attr {
41791	return func(m optionalAttr) {
41792		m["delimiter"] = value
41793	}
41794}
41795
41796// InitializeTableFromTextFileV2Offset sets the optional offset attribute to value.
41797// If not specified, defaults to 0
41798func InitializeTableFromTextFileV2Offset(value int64) InitializeTableFromTextFileV2Attr {
41799	return func(m optionalAttr) {
41800		m["offset"] = value
41801	}
41802}
41803
41804// Initializes a table from a text file.
41805//
41806// It inserts one key-value pair into the table for each line of the file.
41807// The key and value is extracted from the whole line content, elements from the
41808// split line based on `delimiter` or the line number (starting from zero).
41809// Where to extract the key and value from a line is specified by `key_index` and
41810// `value_index`.
41811//
41812// - A value of -1 means use the line number(starting from zero), expects `int64`.
41813// - A value of -2 means use the whole line content, expects `string`.
41814// - A value >= 0 means use the index (starting at zero) of the split line based
41815//   on `delimiter`.
41816//
41817// Arguments:
41818//	table_handle: Handle to a table which will be initialized.
41819//	filename: Filename of a vocabulary text file.
41820//	key_index: Column index in a line to get the table `key` values from.
41821//	value_index: Column index that represents information of a line to get the table
41822// `value` values from.
41823//
41824// Returns the created operation.
41825func InitializeTableFromTextFileV2(scope *Scope, table_handle tf.Output, filename tf.Output, key_index int64, value_index int64, optional ...InitializeTableFromTextFileV2Attr) (o *tf.Operation) {
41826	if scope.Err() != nil {
41827		return
41828	}
41829	attrs := map[string]interface{}{"key_index": key_index, "value_index": value_index}
41830	for _, a := range optional {
41831		a(attrs)
41832	}
41833	opspec := tf.OpSpec{
41834		Type: "InitializeTableFromTextFileV2",
41835		Input: []tf.Input{
41836			table_handle, filename,
41837		},
41838		Attrs: attrs,
41839	}
41840	return scope.AddOperation(opspec)
41841}
41842
41843// ResourceScatterNdSubAttr is an optional argument to ResourceScatterNdSub.
41844type ResourceScatterNdSubAttr func(optionalAttr)
41845
41846// ResourceScatterNdSubUseLocking sets the optional use_locking attribute to value.
41847//
41848// value: An optional bool. Defaults to True. If True, the assignment will
41849// be protected by a lock; otherwise the behavior is undefined,
41850// but may exhibit less contention.
41851// If not specified, defaults to true
41852func ResourceScatterNdSubUseLocking(value bool) ResourceScatterNdSubAttr {
41853	return func(m optionalAttr) {
41854		m["use_locking"] = value
41855	}
41856}
41857
41858// Applies sparse subtraction to individual values or slices in a Variable.
41859//
41860// `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
41861//
41862// `indices` must be integer tensor, containing indices into `ref`.
41863// It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
41864//
41865// The innermost dimension of `indices` (with length `K`) corresponds to
41866// indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
41867// dimension of `ref`.
41868//
41869// `updates` is `Tensor` of rank `Q-1+P-K` with shape:
41870//
41871// ```
41872// [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]
41873// ```
41874//
41875// For example, say we want to subtract 4 scattered elements from a rank-1 tensor
41876// with 8 elements. In Python, that subtraction would look like this:
41877//
41878// ```python
41879// ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True)
41880// indices = tf.constant([[4], [3], [1], [7]])
41881// updates = tf.constant([9, 10, 11, 12])
41882// sub = tf.scatter_nd_sub(ref, indices, updates)
41883// with tf.Session() as sess:
41884//   print sess.run(sub)
41885// ```
41886//
41887// The resulting update to ref would look like this:
41888//
41889//     [1, -9, 3, -6, -4, 6, 7, -4]
41890//
41891// See `tf.scatter_nd` for more details about how to make updates to
41892// slices.
41893//
41894// Arguments:
41895//	ref: A resource handle. Must be from a VarHandleOp.
41896//	indices: A Tensor. Must be one of the following types: int32, int64.
41897// A tensor of indices into ref.
41898//	updates: A Tensor. Must have the same type as ref. A tensor of
41899// values to add to ref.
41900//
41901// Returns the created operation.
41902func ResourceScatterNdSub(scope *Scope, ref tf.Output, indices tf.Output, updates tf.Output, optional ...ResourceScatterNdSubAttr) (o *tf.Operation) {
41903	if scope.Err() != nil {
41904		return
41905	}
41906	attrs := map[string]interface{}{}
41907	for _, a := range optional {
41908		a(attrs)
41909	}
41910	opspec := tf.OpSpec{
41911		Type: "ResourceScatterNdSub",
41912		Input: []tf.Input{
41913			ref, indices, updates,
41914		},
41915		Attrs: attrs,
41916	}
41917	return scope.AddOperation(opspec)
41918}
41919
41920// Computes the minimum along segments of a tensor.
41921//
41922// Read
41923// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
41924// for an explanation of segments.
41925//
41926// This operator is similar to the unsorted segment sum operator found
41927// [(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).
41928// Instead of computing the sum over segments, it computes the minimum such that:
41929//
41930// \\(output_i = \min_{j...} data_[j...]\\) where min is over tuples `j...` such
41931// that `segment_ids[j...] == i`.
41932//
41933// If the minimum is empty for a given segment ID `i`, it outputs the largest
41934// possible value for the specific numeric type,
41935// `output[i] = numeric_limits<T>::max()`.
41936//
41937// For example:
41938//
41939// ``` python
41940// c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]])
41941// tf.unsorted_segment_min(c, tf.constant([0, 1, 0]), num_segments=2)
41942// # ==> [[ 1,  2, 2, 1],
41943// #       [5,  6, 7, 8]]
41944// ```
41945//
41946// If the given segment ID `i` is negative, then the corresponding value is
41947// dropped, and will not be included in the result.
41948//
41949// Arguments:
41950//
41951//	segment_ids: A tensor whose shape is a prefix of `data.shape`.
41952//
41953//
41954// Returns Has same shape as data, except for the first `segment_ids.rank`
41955// dimensions, which are replaced with a single dimension which has size
41956// `num_segments`.
41957func UnsortedSegmentMin(scope *Scope, data tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output) {
41958	if scope.Err() != nil {
41959		return
41960	}
41961	opspec := tf.OpSpec{
41962		Type: "UnsortedSegmentMin",
41963		Input: []tf.Input{
41964			data, segment_ids, num_segments,
41965		},
41966	}
41967	op := scope.AddOperation(opspec)
41968	return op.Output(0)
41969}
41970
41971// Computes the minimum along segments of a tensor.
41972//
41973// Read
41974// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
41975// for an explanation of segments.
41976//
41977// Computes a tensor such that
41978// \\(output_i = \min_j(data_j)\\) where `min` is over `j` such
41979// that `segment_ids[j] == i`.
41980//
41981// If the min is empty for a given segment ID `i`, `output[i] = 0`.
41982//
41983// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
41984// <img style="width:100%" src="https://www.tensorflow.org/images/SegmentMin.png" alt>
41985// </div>
41986//
41987// For example:
41988//
41989// ```
41990// c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]])
41991// tf.segment_min(c, tf.constant([0, 0, 1]))
41992// # ==> [[1, 2, 2, 1],
41993// #      [5, 6, 7, 8]]
41994// ```
41995//
41996// Arguments:
41997//
41998//	segment_ids: A 1-D tensor whose size is equal to the size of `data`'s
41999// first dimension.  Values should be sorted and can be repeated.
42000//
42001// Returns Has same shape as data, except for dimension 0 which
42002// has size `k`, the number of segments.
42003func SegmentMin(scope *Scope, data tf.Output, segment_ids tf.Output) (output tf.Output) {
42004	if scope.Err() != nil {
42005		return
42006	}
42007	opspec := tf.OpSpec{
42008		Type: "SegmentMin",
42009		Input: []tf.Input{
42010			data, segment_ids,
42011		},
42012	}
42013	op := scope.AddOperation(opspec)
42014	return op.Output(0)
42015}
42016
42017// Execute a sub graph on a remote processor.
42018//
42019// The graph specifications(such as graph itself, input tensors and output names)
42020// are stored as a serialized protocol buffer of RemoteFusedGraphExecuteInfo
42021// as serialized_remote_fused_graph_execute_info.
42022// The specifications will be passed to a dedicated registered
42023// remote fused graph executor.  The executor will send the graph specifications
42024// to a remote processor and execute that graph.  The execution results
42025// will be passed to consumer nodes as outputs of this node.
42026//
42027// Arguments:
42028//	inputs: Arbitrary number of tensors with arbitrary data types
42029//
42030//	serialized_remote_fused_graph_execute_info: Serialized protocol buffer
42031// of RemoteFusedGraphExecuteInfo which contains graph specifications.
42032//
42033// Returns Arbitrary number of tensors with arbitrary data types
42034func RemoteFusedGraphExecute(scope *Scope, inputs []tf.Output, Toutputs []tf.DataType, serialized_remote_fused_graph_execute_info string) (outputs []tf.Output) {
42035	if scope.Err() != nil {
42036		return
42037	}
42038	attrs := map[string]interface{}{"Toutputs": Toutputs, "serialized_remote_fused_graph_execute_info": serialized_remote_fused_graph_execute_info}
42039	opspec := tf.OpSpec{
42040		Type: "RemoteFusedGraphExecute",
42041		Input: []tf.Input{
42042			tf.OutputList(inputs),
42043		},
42044		Attrs: attrs,
42045	}
42046	op := scope.AddOperation(opspec)
42047	if scope.Err() != nil {
42048		return
42049	}
42050	var idx int
42051	var err error
42052	if outputs, idx, err = makeOutputList(op, idx, "outputs"); err != nil {
42053		scope.UpdateErr("RemoteFusedGraphExecute", err)
42054		return
42055	}
42056	return outputs
42057}
42058
42059// Creates and returns an empty tensor map.
42060//
42061// handle: an empty tensor map
42062func EmptyTensorMap(scope *Scope) (handle tf.Output) {
42063	if scope.Err() != nil {
42064		return
42065	}
42066	opspec := tf.OpSpec{
42067		Type: "EmptyTensorMap",
42068	}
42069	op := scope.AddOperation(opspec)
42070	return op.Output(0)
42071}
42072
42073// DatasetToGraphAttr is an optional argument to DatasetToGraph.
42074type DatasetToGraphAttr func(optionalAttr)
42075
42076// DatasetToGraphStatefulWhitelist sets the optional stateful_whitelist attribute to value.
42077// If not specified, defaults to <>
42078//
42079// REQUIRES: len(value) >= 0
42080func DatasetToGraphStatefulWhitelist(value []string) DatasetToGraphAttr {
42081	return func(m optionalAttr) {
42082		m["stateful_whitelist"] = value
42083	}
42084}
42085
42086// DatasetToGraphAllowStateful sets the optional allow_stateful attribute to value.
42087// If not specified, defaults to false
42088func DatasetToGraphAllowStateful(value bool) DatasetToGraphAttr {
42089	return func(m optionalAttr) {
42090		m["allow_stateful"] = value
42091	}
42092}
42093
42094// DatasetToGraphStripDeviceAssignment sets the optional strip_device_assignment attribute to value.
42095// If not specified, defaults to false
42096func DatasetToGraphStripDeviceAssignment(value bool) DatasetToGraphAttr {
42097	return func(m optionalAttr) {
42098		m["strip_device_assignment"] = value
42099	}
42100}
42101
42102// Returns a serialized GraphDef representing `input_dataset`.
42103//
42104// Returns a graph representation for `input_dataset`.
42105//
42106// Arguments:
42107//	input_dataset: A variant tensor representing the dataset to return the graph representation for.
42108//
42109// Returns The graph representation of the dataset (as serialized GraphDef).
42110func DatasetToGraph(scope *Scope, input_dataset tf.Output, optional ...DatasetToGraphAttr) (graph tf.Output) {
42111	if scope.Err() != nil {
42112		return
42113	}
42114	attrs := map[string]interface{}{}
42115	for _, a := range optional {
42116		a(attrs)
42117	}
42118	opspec := tf.OpSpec{
42119		Type: "DatasetToGraph",
42120		Input: []tf.Input{
42121			input_dataset,
42122		},
42123		Attrs: attrs,
42124	}
42125	op := scope.AddOperation(opspec)
42126	return op.Output(0)
42127}
42128
42129// ResizeNearestNeighborAttr is an optional argument to ResizeNearestNeighbor.
42130type ResizeNearestNeighborAttr func(optionalAttr)
42131
42132// ResizeNearestNeighborAlignCorners sets the optional align_corners attribute to value.
42133//
42134// value: If true, the centers of the 4 corner pixels of the input and output tensors are
42135// aligned, preserving the values at the corner pixels. Defaults to false.
42136// If not specified, defaults to false
42137func ResizeNearestNeighborAlignCorners(value bool) ResizeNearestNeighborAttr {
42138	return func(m optionalAttr) {
42139		m["align_corners"] = value
42140	}
42141}
42142
42143// ResizeNearestNeighborHalfPixelCenters sets the optional half_pixel_centers attribute to value.
42144// If not specified, defaults to false
42145func ResizeNearestNeighborHalfPixelCenters(value bool) ResizeNearestNeighborAttr {
42146	return func(m optionalAttr) {
42147		m["half_pixel_centers"] = value
42148	}
42149}
42150
42151// Resize `images` to `size` using nearest neighbor interpolation.
42152//
42153// Arguments:
42154//	images: 4-D with shape `[batch, height, width, channels]`.
42155//	size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
42156// new size for the images.
42157//
42158// Returns 4-D with shape
42159// `[batch, new_height, new_width, channels]`.
42160func ResizeNearestNeighbor(scope *Scope, images tf.Output, size tf.Output, optional ...ResizeNearestNeighborAttr) (resized_images tf.Output) {
42161	if scope.Err() != nil {
42162		return
42163	}
42164	attrs := map[string]interface{}{}
42165	for _, a := range optional {
42166		a(attrs)
42167	}
42168	opspec := tf.OpSpec{
42169		Type: "ResizeNearestNeighbor",
42170		Input: []tf.Input{
42171			images, size,
42172		},
42173		Attrs: attrs,
42174	}
42175	op := scope.AddOperation(opspec)
42176	return op.Output(0)
42177}
42178
42179// A placeholder op for a value that will be fed into the computation.
42180//
42181// Arguments:
42182//	dtype: The type of elements in the tensor.
42183//	shape: The shape of the tensor.
42184//
42185// Returns A tensor that will be provided using the infeed mechanism.
42186func InfeedDequeue(scope *Scope, dtype tf.DataType, shape tf.Shape) (output tf.Output) {
42187	if scope.Err() != nil {
42188		return
42189	}
42190	attrs := map[string]interface{}{"dtype": dtype, "shape": shape}
42191	opspec := tf.OpSpec{
42192		Type: "InfeedDequeue",
42193
42194		Attrs: attrs,
42195	}
42196	op := scope.AddOperation(opspec)
42197	return op.Output(0)
42198}
42199
42200// Encodes a `RaggedTensor` into a `variant` Tensor.
42201//
42202//
42203// Encodes the given `RaggedTensor` and returns a `variant` Tensor. If
42204// `batched_input` is True, then input `RaggedTensor` is unbatched along the
42205// zero-th dimension, each component `RaggedTensor` is encoded into a scalar
42206// `variant` Tensor, and these are stacked to return a 1-D `variant` Tensor.
42207// If `batched_input` is False, then the input `RaggedTensor` is encoded as is and
42208// a scalar `variant` Tensor is returned. A `RaggedTensor` is encoded by first
42209// creating a 1-D `variant` Tensor with `ragged_rank + 1` elements, containing the
42210// splits and values Tensors of the `RaggedTensor`. Then the 1-D `variant` Tensor
42211// is wrapped in a scalar `variant` Tensor. See `RaggedTensorFromVariant` for the
42212// corresponding decoding logic.
42213//
42214//
42215// Arguments:
42216//	rt_nested_splits: A list of one or more Tensors representing the splits of the input
42217// `RaggedTensor`.
42218//	rt_dense_values: A Tensor representing the values of the input `RaggedTensor`.
42219//	batched_input: A `bool` denoting whether the input is a batched `RaggedTensor`.
42220//
42221// Returns A `variant` Tensor that containing encoded `RaggedTensor`.
42222func RaggedTensorToVariant(scope *Scope, rt_nested_splits []tf.Output, rt_dense_values tf.Output, batched_input bool) (encoded_ragged tf.Output) {
42223	if scope.Err() != nil {
42224		return
42225	}
42226	attrs := map[string]interface{}{"batched_input": batched_input}
42227	opspec := tf.OpSpec{
42228		Type: "RaggedTensorToVariant",
42229		Input: []tf.Input{
42230			tf.OutputList(rt_nested_splits), rt_dense_values,
42231		},
42232		Attrs: attrs,
42233	}
42234	op := scope.AddOperation(opspec)
42235	return op.Output(0)
42236}
42237
42238// ResourceApplyKerasMomentumAttr is an optional argument to ResourceApplyKerasMomentum.
42239type ResourceApplyKerasMomentumAttr func(optionalAttr)
42240
42241// ResourceApplyKerasMomentumUseLocking sets the optional use_locking attribute to value.
42242//
42243// value: If `True`, updating of the var and accum tensors will be protected
42244// by a lock; otherwise the behavior is undefined, but may exhibit less
42245// contention.
42246// If not specified, defaults to false
42247func ResourceApplyKerasMomentumUseLocking(value bool) ResourceApplyKerasMomentumAttr {
42248	return func(m optionalAttr) {
42249		m["use_locking"] = value
42250	}
42251}
42252
42253// ResourceApplyKerasMomentumUseNesterov sets the optional use_nesterov attribute to value.
42254//
42255// value: If `True`, the tensor passed to compute grad will be
42256// var + momentum * accum, so in the end, the var you get is actually
42257// var + momentum * accum.
42258// If not specified, defaults to false
42259func ResourceApplyKerasMomentumUseNesterov(value bool) ResourceApplyKerasMomentumAttr {
42260	return func(m optionalAttr) {
42261		m["use_nesterov"] = value
42262	}
42263}
42264
42265// Update '*var' according to the momentum scheme.
42266//
42267// Set use_nesterov = True if you want to use Nesterov momentum.
42268//
42269// accum = accum * momentum - lr * grad
42270// var += accum
42271//
42272// Arguments:
42273//	var_: Should be from a Variable().
42274//	accum: Should be from a Variable().
42275//	lr: Scaling factor. Must be a scalar.
42276//	grad: The gradient.
42277//	momentum: Momentum. Must be a scalar.
42278//
42279// Returns the created operation.
42280func ResourceApplyKerasMomentum(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, momentum tf.Output, optional ...ResourceApplyKerasMomentumAttr) (o *tf.Operation) {
42281	if scope.Err() != nil {
42282		return
42283	}
42284	attrs := map[string]interface{}{}
42285	for _, a := range optional {
42286		a(attrs)
42287	}
42288	opspec := tf.OpSpec{
42289		Type: "ResourceApplyKerasMomentum",
42290		Input: []tf.Input{
42291			var_, accum, lr, grad, momentum,
42292		},
42293		Attrs: attrs,
42294	}
42295	return scope.AddOperation(opspec)
42296}
42297
42298// ResourceSparseApplyMomentumAttr is an optional argument to ResourceSparseApplyMomentum.
42299type ResourceSparseApplyMomentumAttr func(optionalAttr)
42300
42301// ResourceSparseApplyMomentumUseLocking sets the optional use_locking attribute to value.
42302//
42303// value: If `True`, updating of the var and accum tensors will be protected
42304// by a lock; otherwise the behavior is undefined, but may exhibit less
42305// contention.
42306// If not specified, defaults to false
42307func ResourceSparseApplyMomentumUseLocking(value bool) ResourceSparseApplyMomentumAttr {
42308	return func(m optionalAttr) {
42309		m["use_locking"] = value
42310	}
42311}
42312
42313// ResourceSparseApplyMomentumUseNesterov sets the optional use_nesterov attribute to value.
42314//
42315// value: If `True`, the tensor passed to compute grad will be
42316// var - lr * momentum * accum, so in the end, the var you get is actually
42317// var - lr * momentum * accum.
42318// If not specified, defaults to false
42319func ResourceSparseApplyMomentumUseNesterov(value bool) ResourceSparseApplyMomentumAttr {
42320	return func(m optionalAttr) {
42321		m["use_nesterov"] = value
42322	}
42323}
42324
42325// Update relevant entries in '*var' and '*accum' according to the momentum scheme.
42326//
42327// Set use_nesterov = True if you want to use Nesterov momentum.
42328//
42329// That is for rows we have grad for, we update var and accum as follows:
42330//
42331// accum = accum * momentum + grad
42332// var -= lr * accum
42333//
42334// Arguments:
42335//	var_: Should be from a Variable().
42336//	accum: Should be from a Variable().
42337//	lr: Learning rate. Must be a scalar.
42338//	grad: The gradient.
42339//	indices: A vector of indices into the first dimension of var and accum.
42340//	momentum: Momentum. Must be a scalar.
42341//
42342// Returns the created operation.
42343func ResourceSparseApplyMomentum(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, indices tf.Output, momentum tf.Output, optional ...ResourceSparseApplyMomentumAttr) (o *tf.Operation) {
42344	if scope.Err() != nil {
42345		return
42346	}
42347	attrs := map[string]interface{}{}
42348	for _, a := range optional {
42349		a(attrs)
42350	}
42351	opspec := tf.OpSpec{
42352		Type: "ResourceSparseApplyMomentum",
42353		Input: []tf.Input{
42354			var_, accum, lr, grad, indices, momentum,
42355		},
42356		Attrs: attrs,
42357	}
42358	return scope.AddOperation(opspec)
42359}
42360
42361// RecvAttr is an optional argument to Recv.
42362type RecvAttr func(optionalAttr)
42363
42364// RecvClientTerminated sets the optional client_terminated attribute to value.
42365//
42366// value: If set to true, this indicates that the node was added
42367// to the graph as a result of a client-side feed or fetch of Tensor data,
42368// in which case the corresponding send or recv is expected to be managed
42369// locally by the caller.
42370// If not specified, defaults to false
42371func RecvClientTerminated(value bool) RecvAttr {
42372	return func(m optionalAttr) {
42373		m["client_terminated"] = value
42374	}
42375}
42376
42377// Receives the named tensor from send_device on recv_device.
42378//
42379// Arguments:
42380//
42381//	tensor_name: The name of the tensor to receive.
42382//	send_device: The name of the device sending the tensor.
42383//	send_device_incarnation: The current incarnation of send_device.
42384//	recv_device: The name of the device receiving the tensor.
42385//
42386// Returns The tensor to receive.
42387func Recv(scope *Scope, tensor_type tf.DataType, tensor_name string, send_device string, send_device_incarnation int64, recv_device string, optional ...RecvAttr) (tensor tf.Output) {
42388	if scope.Err() != nil {
42389		return
42390	}
42391	attrs := map[string]interface{}{"tensor_type": tensor_type, "tensor_name": tensor_name, "send_device": send_device, "send_device_incarnation": send_device_incarnation, "recv_device": recv_device}
42392	for _, a := range optional {
42393		a(attrs)
42394	}
42395	opspec := tf.OpSpec{
42396		Type: "Recv",
42397
42398		Attrs: attrs,
42399	}
42400	op := scope.AddOperation(opspec)
42401	return op.Output(0)
42402}
42403
42404// UniqueWithCountsAttr is an optional argument to UniqueWithCounts.
42405type UniqueWithCountsAttr func(optionalAttr)
42406
42407// UniqueWithCountsOutIdx sets the optional out_idx attribute to value.
42408// If not specified, defaults to DT_INT32
42409func UniqueWithCountsOutIdx(value tf.DataType) UniqueWithCountsAttr {
42410	return func(m optionalAttr) {
42411		m["out_idx"] = value
42412	}
42413}
42414
42415// Finds unique elements in a 1-D tensor.
42416//
42417// This operation returns a tensor `y` containing all of the unique elements of `x`
42418// sorted in the same order that they occur in `x`. This operation also returns a
42419// tensor `idx` the same size as `x` that contains the index of each value of `x`
42420// in the unique output `y`. Finally, it returns a third tensor `count` that
42421// contains the count of each element of `y` in `x`. In other words:
42422//
42423// `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
42424//
42425// For example:
42426//
42427// ```
42428// # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
42429// y, idx, count = unique_with_counts(x)
42430// y ==> [1, 2, 4, 7, 8]
42431// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
42432// count ==> [2, 1, 3, 1, 2]
42433// ```
42434//
42435// Arguments:
42436//	x: 1-D.
42437//
42438// Returns:
42439//	y: 1-D.
42440//	idx: 1-D.
42441//	count: 1-D.
42442func UniqueWithCounts(scope *Scope, x tf.Output, optional ...UniqueWithCountsAttr) (y tf.Output, idx tf.Output, count tf.Output) {
42443	if scope.Err() != nil {
42444		return
42445	}
42446	attrs := map[string]interface{}{}
42447	for _, a := range optional {
42448		a(attrs)
42449	}
42450	opspec := tf.OpSpec{
42451		Type: "UniqueWithCounts",
42452		Input: []tf.Input{
42453			x,
42454		},
42455		Attrs: attrs,
42456	}
42457	op := scope.AddOperation(opspec)
42458	return op.Output(0), op.Output(1), op.Output(2)
42459}
42460
42461// ResizeBicubicGradAttr is an optional argument to ResizeBicubicGrad.
42462type ResizeBicubicGradAttr func(optionalAttr)
42463
42464// ResizeBicubicGradAlignCorners sets the optional align_corners attribute to value.
42465//
42466// value: If true, the centers of the 4 corner pixels of the input and grad tensors are
42467// aligned. Defaults to false.
42468// If not specified, defaults to false
42469func ResizeBicubicGradAlignCorners(value bool) ResizeBicubicGradAttr {
42470	return func(m optionalAttr) {
42471		m["align_corners"] = value
42472	}
42473}
42474
42475// ResizeBicubicGradHalfPixelCenters sets the optional half_pixel_centers attribute to value.
42476// If not specified, defaults to false
42477func ResizeBicubicGradHalfPixelCenters(value bool) ResizeBicubicGradAttr {
42478	return func(m optionalAttr) {
42479		m["half_pixel_centers"] = value
42480	}
42481}
42482
42483// Computes the gradient of bicubic interpolation.
42484//
42485// Arguments:
42486//	grads: 4-D with shape `[batch, height, width, channels]`.
42487//	original_image: 4-D with shape `[batch, orig_height, orig_width, channels]`,
42488// The image tensor that was resized.
42489//
42490// Returns 4-D with shape `[batch, orig_height, orig_width, channels]`.
42491// Gradients with respect to the input image. Input image must have been
42492// float or double.
42493func ResizeBicubicGrad(scope *Scope, grads tf.Output, original_image tf.Output, optional ...ResizeBicubicGradAttr) (output tf.Output) {
42494	if scope.Err() != nil {
42495		return
42496	}
42497	attrs := map[string]interface{}{}
42498	for _, a := range optional {
42499		a(attrs)
42500	}
42501	opspec := tf.OpSpec{
42502		Type: "ResizeBicubicGrad",
42503		Input: []tf.Input{
42504			grads, original_image,
42505		},
42506		Attrs: attrs,
42507	}
42508	op := scope.AddOperation(opspec)
42509	return op.Output(0)
42510}
42511
42512// TensorListConcatAttr is an optional argument to TensorListConcat.
42513type TensorListConcatAttr func(optionalAttr)
42514
42515// TensorListConcatElementShape sets the optional element_shape attribute to value.
42516// If not specified, defaults to <unknown_rank:true >
42517func TensorListConcatElementShape(value tf.Shape) TensorListConcatAttr {
42518	return func(m optionalAttr) {
42519		m["element_shape"] = value
42520	}
42521}
42522
42523// Concats all tensors in the list along the 0th dimension.
42524//
42525// Requires that all tensors have the same shape except the first dimension.
42526//
42527// input_handle: The input list.
42528// tensor: The concated result.
42529// lengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used for computing the gradient.
42530//
42531func TensorListConcat(scope *Scope, input_handle tf.Output, element_dtype tf.DataType, optional ...TensorListConcatAttr) (tensor tf.Output, lengths tf.Output) {
42532	if scope.Err() != nil {
42533		return
42534	}
42535	attrs := map[string]interface{}{"element_dtype": element_dtype}
42536	for _, a := range optional {
42537		a(attrs)
42538	}
42539	opspec := tf.OpSpec{
42540		Type: "TensorListConcat",
42541		Input: []tf.Input{
42542			input_handle,
42543		},
42544		Attrs: attrs,
42545	}
42546	op := scope.AddOperation(opspec)
42547	return op.Output(0), op.Output(1)
42548}
42549
42550// Returns immutable tensor from memory region.
42551//
42552// The current implementation memmaps the tensor from a file.
42553//
42554// Arguments:
42555//	dtype: Type of the returned tensor.
42556//	shape: Shape of the returned tensor.
42557//	memory_region_name: Name of readonly memory region used by the tensor, see
42558// NewReadOnlyMemoryRegionFromFile in tensorflow::Env.
42559func ImmutableConst(scope *Scope, dtype tf.DataType, shape tf.Shape, memory_region_name string) (tensor tf.Output) {
42560	if scope.Err() != nil {
42561		return
42562	}
42563	attrs := map[string]interface{}{"dtype": dtype, "shape": shape, "memory_region_name": memory_region_name}
42564	opspec := tf.OpSpec{
42565		Type: "ImmutableConst",
42566
42567		Attrs: attrs,
42568	}
42569	op := scope.AddOperation(opspec)
42570	return op.Output(0)
42571}
42572
42573// Add the quantile summaries to each quantile stream resource.
42574//
42575// An op that adds a list of quantile summaries to a quantile stream resource. Each
42576// summary Tensor is rank 2, containing summaries (value, weight, min_rank, max_rank)
42577// for a single feature.
42578//
42579// Arguments:
42580//	quantile_stream_resource_handle: resource handle referring to a QuantileStreamResource.
42581//	summaries: string; List of Rank 2 Tensor each containing the summaries for a single feature.
42582//
42583// Returns the created operation.
42584func BoostedTreesQuantileStreamResourceAddSummaries(scope *Scope, quantile_stream_resource_handle tf.Output, summaries []tf.Output) (o *tf.Operation) {
42585	if scope.Err() != nil {
42586		return
42587	}
42588	opspec := tf.OpSpec{
42589		Type: "BoostedTreesQuantileStreamResourceAddSummaries",
42590		Input: []tf.Input{
42591			quantile_stream_resource_handle, tf.OutputList(summaries),
42592		},
42593	}
42594	return scope.AddOperation(opspec)
42595}
42596
42597// LoadTPUEmbeddingRMSPropParametersGradAccumDebugAttr is an optional argument to LoadTPUEmbeddingRMSPropParametersGradAccumDebug.
42598type LoadTPUEmbeddingRMSPropParametersGradAccumDebugAttr func(optionalAttr)
42599
42600// LoadTPUEmbeddingRMSPropParametersGradAccumDebugTableId sets the optional table_id attribute to value.
42601// If not specified, defaults to -1
42602func LoadTPUEmbeddingRMSPropParametersGradAccumDebugTableId(value int64) LoadTPUEmbeddingRMSPropParametersGradAccumDebugAttr {
42603	return func(m optionalAttr) {
42604		m["table_id"] = value
42605	}
42606}
42607
42608// LoadTPUEmbeddingRMSPropParametersGradAccumDebugTableName sets the optional table_name attribute to value.
42609// If not specified, defaults to ""
42610func LoadTPUEmbeddingRMSPropParametersGradAccumDebugTableName(value string) LoadTPUEmbeddingRMSPropParametersGradAccumDebugAttr {
42611	return func(m optionalAttr) {
42612		m["table_name"] = value
42613	}
42614}
42615
42616// LoadTPUEmbeddingRMSPropParametersGradAccumDebugConfig sets the optional config attribute to value.
42617// If not specified, defaults to ""
42618func LoadTPUEmbeddingRMSPropParametersGradAccumDebugConfig(value string) LoadTPUEmbeddingRMSPropParametersGradAccumDebugAttr {
42619	return func(m optionalAttr) {
42620		m["config"] = value
42621	}
42622}
42623
42624// Load RMSProp embedding parameters with debug support.
42625//
42626// An op that loads optimization parameters into HBM for embedding. Must be
42627// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
42628// embedding table configuration. For example, this op is used to install
42629// parameters that are loaded from a checkpoint before a training loop is
42630// executed.
42631//
42632// Arguments:
42633//	parameters: Value of parameters used in the RMSProp optimization algorithm.
42634//	ms: Value of ms used in the RMSProp optimization algorithm.
42635//	mom: Value of mom used in the RMSProp optimization algorithm.
42636//	gradient_accumulators: Value of gradient_accumulators used in the RMSProp optimization algorithm.
42637//
42638//
42639//
42640// Returns the created operation.
42641func LoadTPUEmbeddingRMSPropParametersGradAccumDebug(scope *Scope, parameters tf.Output, ms tf.Output, mom tf.Output, gradient_accumulators tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingRMSPropParametersGradAccumDebugAttr) (o *tf.Operation) {
42642	if scope.Err() != nil {
42643		return
42644	}
42645	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
42646	for _, a := range optional {
42647		a(attrs)
42648	}
42649	opspec := tf.OpSpec{
42650		Type: "LoadTPUEmbeddingRMSPropParametersGradAccumDebug",
42651		Input: []tf.Input{
42652			parameters, ms, mom, gradient_accumulators,
42653		},
42654		Attrs: attrs,
42655	}
42656	return scope.AddOperation(opspec)
42657}
42658
42659// LoadTPUEmbeddingAdadeltaParametersAttr is an optional argument to LoadTPUEmbeddingAdadeltaParameters.
42660type LoadTPUEmbeddingAdadeltaParametersAttr func(optionalAttr)
42661
42662// LoadTPUEmbeddingAdadeltaParametersTableId sets the optional table_id attribute to value.
42663// If not specified, defaults to -1
42664func LoadTPUEmbeddingAdadeltaParametersTableId(value int64) LoadTPUEmbeddingAdadeltaParametersAttr {
42665	return func(m optionalAttr) {
42666		m["table_id"] = value
42667	}
42668}
42669
42670// LoadTPUEmbeddingAdadeltaParametersTableName sets the optional table_name attribute to value.
42671// If not specified, defaults to ""
42672func LoadTPUEmbeddingAdadeltaParametersTableName(value string) LoadTPUEmbeddingAdadeltaParametersAttr {
42673	return func(m optionalAttr) {
42674		m["table_name"] = value
42675	}
42676}
42677
42678// LoadTPUEmbeddingAdadeltaParametersConfig sets the optional config attribute to value.
42679// If not specified, defaults to ""
42680func LoadTPUEmbeddingAdadeltaParametersConfig(value string) LoadTPUEmbeddingAdadeltaParametersAttr {
42681	return func(m optionalAttr) {
42682		m["config"] = value
42683	}
42684}
42685
42686// Load Adadelta embedding parameters.
42687//
42688// An op that loads optimization parameters into HBM for embedding. Must be
42689// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
42690// embedding table configuration. For example, this op is used to install
42691// parameters that are loaded from a checkpoint before a training loop is
42692// executed.
42693//
42694// Arguments:
42695//	parameters: Value of parameters used in the Adadelta optimization algorithm.
42696//	accumulators: Value of accumulators used in the Adadelta optimization algorithm.
42697//	updates: Value of updates used in the Adadelta optimization algorithm.
42698//
42699//
42700//
42701// Returns the created operation.
42702func LoadTPUEmbeddingAdadeltaParameters(scope *Scope, parameters tf.Output, accumulators tf.Output, updates tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingAdadeltaParametersAttr) (o *tf.Operation) {
42703	if scope.Err() != nil {
42704		return
42705	}
42706	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
42707	for _, a := range optional {
42708		a(attrs)
42709	}
42710	opspec := tf.OpSpec{
42711		Type: "LoadTPUEmbeddingAdadeltaParameters",
42712		Input: []tf.Input{
42713			parameters, accumulators, updates,
42714		},
42715		Attrs: attrs,
42716	}
42717	return scope.AddOperation(opspec)
42718}
42719
42720// LoadTPUEmbeddingFTRLParametersGradAccumDebugAttr is an optional argument to LoadTPUEmbeddingFTRLParametersGradAccumDebug.
42721type LoadTPUEmbeddingFTRLParametersGradAccumDebugAttr func(optionalAttr)
42722
42723// LoadTPUEmbeddingFTRLParametersGradAccumDebugTableId sets the optional table_id attribute to value.
42724// If not specified, defaults to -1
42725func LoadTPUEmbeddingFTRLParametersGradAccumDebugTableId(value int64) LoadTPUEmbeddingFTRLParametersGradAccumDebugAttr {
42726	return func(m optionalAttr) {
42727		m["table_id"] = value
42728	}
42729}
42730
42731// LoadTPUEmbeddingFTRLParametersGradAccumDebugTableName sets the optional table_name attribute to value.
42732// If not specified, defaults to ""
42733func LoadTPUEmbeddingFTRLParametersGradAccumDebugTableName(value string) LoadTPUEmbeddingFTRLParametersGradAccumDebugAttr {
42734	return func(m optionalAttr) {
42735		m["table_name"] = value
42736	}
42737}
42738
42739// LoadTPUEmbeddingFTRLParametersGradAccumDebugConfig sets the optional config attribute to value.
42740// If not specified, defaults to ""
42741func LoadTPUEmbeddingFTRLParametersGradAccumDebugConfig(value string) LoadTPUEmbeddingFTRLParametersGradAccumDebugAttr {
42742	return func(m optionalAttr) {
42743		m["config"] = value
42744	}
42745}
42746
42747// Load FTRL embedding parameters with debug support.
42748//
42749// An op that loads optimization parameters into HBM for embedding. Must be
42750// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
42751// embedding table configuration. For example, this op is used to install
42752// parameters that are loaded from a checkpoint before a training loop is
42753// executed.
42754//
42755// Arguments:
42756//	parameters: Value of parameters used in the FTRL optimization algorithm.
42757//	accumulators: Value of accumulators used in the FTRL optimization algorithm.
42758//	linears: Value of linears used in the FTRL optimization algorithm.
42759//	gradient_accumulators: Value of gradient_accumulators used in the FTRL optimization algorithm.
42760//
42761//
42762//
42763// Returns the created operation.
42764func LoadTPUEmbeddingFTRLParametersGradAccumDebug(scope *Scope, parameters tf.Output, accumulators tf.Output, linears tf.Output, gradient_accumulators tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingFTRLParametersGradAccumDebugAttr) (o *tf.Operation) {
42765	if scope.Err() != nil {
42766		return
42767	}
42768	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
42769	for _, a := range optional {
42770		a(attrs)
42771	}
42772	opspec := tf.OpSpec{
42773		Type: "LoadTPUEmbeddingFTRLParametersGradAccumDebug",
42774		Input: []tf.Input{
42775			parameters, accumulators, linears, gradient_accumulators,
42776		},
42777		Attrs: attrs,
42778	}
42779	return scope.AddOperation(opspec)
42780}
42781
42782// ResourceSparseApplyFtrlV2Attr is an optional argument to ResourceSparseApplyFtrlV2.
42783type ResourceSparseApplyFtrlV2Attr func(optionalAttr)
42784
42785// ResourceSparseApplyFtrlV2UseLocking sets the optional use_locking attribute to value.
42786//
42787// value: If `True`, updating of the var and accum tensors will be protected
42788// by a lock; otherwise the behavior is undefined, but may exhibit less
42789// contention.
42790// If not specified, defaults to false
42791func ResourceSparseApplyFtrlV2UseLocking(value bool) ResourceSparseApplyFtrlV2Attr {
42792	return func(m optionalAttr) {
42793		m["use_locking"] = value
42794	}
42795}
42796
42797// ResourceSparseApplyFtrlV2MultiplyLinearByLr sets the optional multiply_linear_by_lr attribute to value.
42798// If not specified, defaults to false
42799func ResourceSparseApplyFtrlV2MultiplyLinearByLr(value bool) ResourceSparseApplyFtrlV2Attr {
42800	return func(m optionalAttr) {
42801		m["multiply_linear_by_lr"] = value
42802	}
42803}
42804
42805// Update relevant entries in '*var' according to the Ftrl-proximal scheme.
42806//
42807// That is for rows we have grad for, we update var, accum and linear as follows:
42808// grad_with_shrinkage = grad + 2 * l2_shrinkage * var
42809// accum_new = accum + grad_with_shrinkage * grad_with_shrinkage
42810// linear += grad_with_shrinkage +
42811//     (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
42812// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
42813// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
42814// accum = accum_new
42815//
42816// Arguments:
42817//	var_: Should be from a Variable().
42818//	accum: Should be from a Variable().
42819//	linear: Should be from a Variable().
42820//	grad: The gradient.
42821//	indices: A vector of indices into the first dimension of var and accum.
42822//	lr: Scaling factor. Must be a scalar.
42823//	l1: L1 regularization. Must be a scalar.
42824//	l2: L2 shrinkage regularization. Must be a scalar.
42825//
42826//	lr_power: Scaling factor. Must be a scalar.
42827//
42828// Returns the created operation.
42829func ResourceSparseApplyFtrlV2(scope *Scope, var_ tf.Output, accum tf.Output, linear tf.Output, grad tf.Output, indices tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, l2_shrinkage tf.Output, lr_power tf.Output, optional ...ResourceSparseApplyFtrlV2Attr) (o *tf.Operation) {
42830	if scope.Err() != nil {
42831		return
42832	}
42833	attrs := map[string]interface{}{}
42834	for _, a := range optional {
42835		a(attrs)
42836	}
42837	opspec := tf.OpSpec{
42838		Type: "ResourceSparseApplyFtrlV2",
42839		Input: []tf.Input{
42840			var_, accum, linear, grad, indices, lr, l1, l2, l2_shrinkage, lr_power,
42841		},
42842		Attrs: attrs,
42843	}
42844	return scope.AddOperation(opspec)
42845}
42846
42847// Computes the LSTM cell backward propagation for the entire time sequence.
42848//
42849// This implementation is to be used in conjunction of LSTMBlock.
42850//
42851// Arguments:
42852//	seq_len_max: Maximum time length actually used by this input. Outputs are padded
42853// with zeros beyond this length.
42854//	x: The sequence input to the LSTM, shape (timelen, batch_size, num_inputs).
42855//	cs_prev: Value of the initial cell state.
42856//	h_prev: Initial output of cell (to be used for peephole).
42857//	w: The weight matrix.
42858//	wci: The weight matrix for input gate peephole connection.
42859//	wcf: The weight matrix for forget gate peephole connection.
42860//	wco: The weight matrix for output gate peephole connection.
42861//	b: The bias vector.
42862//	i: The input gate over the whole time sequence.
42863//	cs: The cell state before the tanh over the whole time sequence.
42864//	f: The forget gate over the whole time sequence.
42865//	o: The output gate over the whole time sequence.
42866//	ci: The cell input over the whole time sequence.
42867//	co: The cell after the tanh over the whole time sequence.
42868//	h: The output h vector over the whole time sequence.
42869//	cs_grad: The current gradient of cs.
42870//	h_grad: The gradient of h vector.
42871//	use_peephole: Whether to use peephole weights.
42872//
42873// Returns:
42874//	x_grad: The gradient of x to be back-propped.
42875//	cs_prev_grad: The gradient of cs_prev to be back-propped.
42876//	h_prev_grad: The gradient of h_prev to be back-propped.
42877//	w_grad: The gradient for w to be back-propped.
42878//	wci_grad: The gradient for wci to be back-propped.
42879//	wcf_grad: The gradient for wcf to be back-propped.
42880//	wco_grad: The gradient for wco to be back-propped.
42881//	b_grad: The gradient for w to be back-propped.
42882func BlockLSTMGrad(scope *Scope, seq_len_max tf.Output, x tf.Output, cs_prev tf.Output, h_prev tf.Output, w tf.Output, wci tf.Output, wcf tf.Output, wco tf.Output, b tf.Output, i tf.Output, cs tf.Output, f tf.Output, o tf.Output, ci tf.Output, co tf.Output, h tf.Output, cs_grad tf.Output, h_grad tf.Output, use_peephole bool) (x_grad tf.Output, cs_prev_grad tf.Output, h_prev_grad tf.Output, w_grad tf.Output, wci_grad tf.Output, wcf_grad tf.Output, wco_grad tf.Output, b_grad tf.Output) {
42883	if scope.Err() != nil {
42884		return
42885	}
42886	attrs := map[string]interface{}{"use_peephole": use_peephole}
42887	opspec := tf.OpSpec{
42888		Type: "BlockLSTMGrad",
42889		Input: []tf.Input{
42890			seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad,
42891		},
42892		Attrs: attrs,
42893	}
42894	op := scope.AddOperation(opspec)
42895	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4), op.Output(5), op.Output(6), op.Output(7)
42896}
42897
42898// Inserts a dimension of 1 into a tensor's shape.
42899//
42900// Given a tensor `input`, this operation inserts a dimension of 1 at the
42901// dimension index `axis` of `input`'s shape. The dimension index `axis` starts at
42902// zero; if you specify a negative number for `axis` it is counted backward from
42903// the end.
42904//
42905// This operation is useful if you want to add a batch dimension to a single
42906// element. For example, if you have a single image of shape `[height, width,
42907// channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,
42908// which will make the shape `[1, height, width, channels]`.
42909//
42910// Other examples:
42911//
42912// ```
42913// # 't' is a tensor of shape [2]
42914// shape(expand_dims(t, 0)) ==> [1, 2]
42915// shape(expand_dims(t, 1)) ==> [2, 1]
42916// shape(expand_dims(t, -1)) ==> [2, 1]
42917//
42918// # 't2' is a tensor of shape [2, 3, 5]
42919// shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5]
42920// shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5]
42921// shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1]
42922// ```
42923//
42924// This operation requires that:
42925//
42926// `-1-input.dims() <= dim <= input.dims()`
42927//
42928// This operation is related to `squeeze()`, which removes dimensions of
42929// size 1.
42930//
42931// Arguments:
42932//
42933//	axis: 0-D (scalar). Specifies the dimension index at which to
42934// expand the shape of `input`. Must be in the range
42935// `[-rank(input) - 1, rank(input)]`.
42936//
42937// Returns Contains the same data as `input`, but its shape has an additional
42938// dimension of size 1 added.
42939func ExpandDims(scope *Scope, input tf.Output, axis tf.Output) (output tf.Output) {
42940	if scope.Err() != nil {
42941		return
42942	}
42943	opspec := tf.OpSpec{
42944		Type: "ExpandDims",
42945		Input: []tf.Input{
42946			input, axis,
42947		},
42948	}
42949	op := scope.AddOperation(opspec)
42950	return op.Output(0)
42951}
42952
42953// ResourceSparseApplyProximalGradientDescentAttr is an optional argument to ResourceSparseApplyProximalGradientDescent.
42954type ResourceSparseApplyProximalGradientDescentAttr func(optionalAttr)
42955
42956// ResourceSparseApplyProximalGradientDescentUseLocking sets the optional use_locking attribute to value.
42957//
42958// value: If True, the subtraction will be protected by a lock;
42959// otherwise the behavior is undefined, but may exhibit less contention.
42960// If not specified, defaults to false
42961func ResourceSparseApplyProximalGradientDescentUseLocking(value bool) ResourceSparseApplyProximalGradientDescentAttr {
42962	return func(m optionalAttr) {
42963		m["use_locking"] = value
42964	}
42965}
42966
42967// Sparse update '*var' as FOBOS algorithm with fixed learning rate.
42968//
42969// That is for rows we have grad for, we update var as follows:
42970// prox_v = var - alpha * grad
42971// var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
42972//
42973// Arguments:
42974//	var_: Should be from a Variable().
42975//	alpha: Scaling factor. Must be a scalar.
42976//	l1: L1 regularization. Must be a scalar.
42977//	l2: L2 regularization. Must be a scalar.
42978//	grad: The gradient.
42979//	indices: A vector of indices into the first dimension of var and accum.
42980//
42981// Returns the created operation.
42982func ResourceSparseApplyProximalGradientDescent(scope *Scope, var_ tf.Output, alpha tf.Output, l1 tf.Output, l2 tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyProximalGradientDescentAttr) (o *tf.Operation) {
42983	if scope.Err() != nil {
42984		return
42985	}
42986	attrs := map[string]interface{}{}
42987	for _, a := range optional {
42988		a(attrs)
42989	}
42990	opspec := tf.OpSpec{
42991		Type: "ResourceSparseApplyProximalGradientDescent",
42992		Input: []tf.Input{
42993			var_, alpha, l1, l2, grad, indices,
42994		},
42995		Attrs: attrs,
42996	}
42997	return scope.AddOperation(opspec)
42998}
42999
43000// SparseMatrixTransposeAttr is an optional argument to SparseMatrixTranspose.
43001type SparseMatrixTransposeAttr func(optionalAttr)
43002
43003// SparseMatrixTransposeConjugate sets the optional conjugate attribute to value.
43004//
43005// value: Indicates whether `input` should be conjugated.
43006// If not specified, defaults to false
43007func SparseMatrixTransposeConjugate(value bool) SparseMatrixTransposeAttr {
43008	return func(m optionalAttr) {
43009		m["conjugate"] = value
43010	}
43011}
43012
43013// Transposes the inner (matrix) dimensions of a CSRSparseMatrix.
43014//
43015// Transposes the inner (matrix) dimensions of a SparseMatrix and optionally
43016// conjugates its values.
43017//
43018// Arguments:
43019//	input: A CSRSparseMatrix.
43020//
43021//
43022// Returns A CSRSparseMatrix.
43023func SparseMatrixTranspose(scope *Scope, input tf.Output, type_ tf.DataType, optional ...SparseMatrixTransposeAttr) (output tf.Output) {
43024	if scope.Err() != nil {
43025		return
43026	}
43027	attrs := map[string]interface{}{"type": type_}
43028	for _, a := range optional {
43029		a(attrs)
43030	}
43031	opspec := tf.OpSpec{
43032		Type: "SparseMatrixTranspose",
43033		Input: []tf.Input{
43034			input,
43035		},
43036		Attrs: attrs,
43037	}
43038	op := scope.AddOperation(opspec)
43039	return op.Output(0)
43040}
43041
43042// LoadTPUEmbeddingProximalAdagradParametersAttr is an optional argument to LoadTPUEmbeddingProximalAdagradParameters.
43043type LoadTPUEmbeddingProximalAdagradParametersAttr func(optionalAttr)
43044
43045// LoadTPUEmbeddingProximalAdagradParametersTableId sets the optional table_id attribute to value.
43046// If not specified, defaults to -1
43047func LoadTPUEmbeddingProximalAdagradParametersTableId(value int64) LoadTPUEmbeddingProximalAdagradParametersAttr {
43048	return func(m optionalAttr) {
43049		m["table_id"] = value
43050	}
43051}
43052
43053// LoadTPUEmbeddingProximalAdagradParametersTableName sets the optional table_name attribute to value.
43054// If not specified, defaults to ""
43055func LoadTPUEmbeddingProximalAdagradParametersTableName(value string) LoadTPUEmbeddingProximalAdagradParametersAttr {
43056	return func(m optionalAttr) {
43057		m["table_name"] = value
43058	}
43059}
43060
43061// LoadTPUEmbeddingProximalAdagradParametersConfig sets the optional config attribute to value.
43062// If not specified, defaults to ""
43063func LoadTPUEmbeddingProximalAdagradParametersConfig(value string) LoadTPUEmbeddingProximalAdagradParametersAttr {
43064	return func(m optionalAttr) {
43065		m["config"] = value
43066	}
43067}
43068
43069// Load proximal Adagrad embedding parameters.
43070//
43071// An op that loads optimization parameters into HBM for embedding. Must be
43072// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
43073// embedding table configuration. For example, this op is used to install
43074// parameters that are loaded from a checkpoint before a training loop is
43075// executed.
43076//
43077// Arguments:
43078//	parameters: Value of parameters used in the proximal Adagrad optimization algorithm.
43079//	accumulators: Value of accumulators used in the proximal Adagrad optimization algorithm.
43080//
43081//
43082//
43083// Returns the created operation.
43084func LoadTPUEmbeddingProximalAdagradParameters(scope *Scope, parameters tf.Output, accumulators tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingProximalAdagradParametersAttr) (o *tf.Operation) {
43085	if scope.Err() != nil {
43086		return
43087	}
43088	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
43089	for _, a := range optional {
43090		a(attrs)
43091	}
43092	opspec := tf.OpSpec{
43093		Type: "LoadTPUEmbeddingProximalAdagradParameters",
43094		Input: []tf.Input{
43095			parameters, accumulators,
43096		},
43097		Attrs: attrs,
43098	}
43099	return scope.AddOperation(opspec)
43100}
43101
43102// ResourceSparseApplyAdagradDAAttr is an optional argument to ResourceSparseApplyAdagradDA.
43103type ResourceSparseApplyAdagradDAAttr func(optionalAttr)
43104
43105// ResourceSparseApplyAdagradDAUseLocking sets the optional use_locking attribute to value.
43106//
43107// value: If True, updating of the var and accum tensors will be protected by
43108// a lock; otherwise the behavior is undefined, but may exhibit less contention.
43109// If not specified, defaults to false
43110func ResourceSparseApplyAdagradDAUseLocking(value bool) ResourceSparseApplyAdagradDAAttr {
43111	return func(m optionalAttr) {
43112		m["use_locking"] = value
43113	}
43114}
43115
43116// Update entries in '*var' and '*accum' according to the proximal adagrad scheme.
43117//
43118// Arguments:
43119//	var_: Should be from a Variable().
43120//	gradient_accumulator: Should be from a Variable().
43121//	gradient_squared_accumulator: Should be from a Variable().
43122//	grad: The gradient.
43123//	indices: A vector of indices into the first dimension of var and accum.
43124//	lr: Learning rate. Must be a scalar.
43125//	l1: L1 regularization. Must be a scalar.
43126//	l2: L2 regularization. Must be a scalar.
43127//	global_step: Training step number. Must be a scalar.
43128//
43129// Returns the created operation.
43130func ResourceSparseApplyAdagradDA(scope *Scope, var_ tf.Output, gradient_accumulator tf.Output, gradient_squared_accumulator tf.Output, grad tf.Output, indices tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, global_step tf.Output, optional ...ResourceSparseApplyAdagradDAAttr) (o *tf.Operation) {
43131	if scope.Err() != nil {
43132		return
43133	}
43134	attrs := map[string]interface{}{}
43135	for _, a := range optional {
43136		a(attrs)
43137	}
43138	opspec := tf.OpSpec{
43139		Type: "ResourceSparseApplyAdagradDA",
43140		Input: []tf.Input{
43141			var_, gradient_accumulator, gradient_squared_accumulator, grad, indices, lr, l1, l2, global_step,
43142		},
43143		Attrs: attrs,
43144	}
43145	return scope.AddOperation(opspec)
43146}
43147
43148// Adjust the hue of one or more images.
43149//
43150// `images` is a tensor of at least 3 dimensions.  The last dimension is
43151// interpreted as channels, and must be three.
43152//
43153// The input image is considered in the RGB colorspace. Conceptually, the RGB
43154// colors are first mapped into HSV. A delta is then applied all the hue values,
43155// and then remapped back to RGB colorspace.
43156//
43157// Arguments:
43158//	images: Images to adjust.  At least 3-D.
43159//	delta: A float delta to add to the hue.
43160//
43161// Returns The hue-adjusted image or images.
43162func AdjustHue(scope *Scope, images tf.Output, delta tf.Output) (output tf.Output) {
43163	if scope.Err() != nil {
43164		return
43165	}
43166	opspec := tf.OpSpec{
43167		Type: "AdjustHue",
43168		Input: []tf.Input{
43169			images, delta,
43170		},
43171	}
43172	op := scope.AddOperation(opspec)
43173	return op.Output(0)
43174}
43175
43176// Computes hyperbolic cosine of x element-wise.
43177//
43178//   Given an input tensor, this function computes hyperbolic cosine of every
43179//   element in the tensor. Input range is `[-inf, inf]` and output range
43180//   is `[1, inf]`.
43181//
43182//   ```python
43183//   x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")])
43184//   tf.math.cosh(x) ==> [inf 4.0515420e+03 1.1276259e+00 1.5430807e+00 1.8106556e+00 3.7621956e+00 1.1013233e+04 inf]
43185//   ```
43186func Cosh(scope *Scope, x tf.Output) (y tf.Output) {
43187	if scope.Err() != nil {
43188		return
43189	}
43190	opspec := tf.OpSpec{
43191		Type: "Cosh",
43192		Input: []tf.Input{
43193			x,
43194		},
43195	}
43196	op := scope.AddOperation(opspec)
43197	return op.Output(0)
43198}
43199
43200// CollectiveReduceAttr is an optional argument to CollectiveReduce.
43201type CollectiveReduceAttr func(optionalAttr)
43202
43203// CollectiveReduceWaitFor sets the optional wait_for attribute to value.
43204// If not specified, defaults to <>
43205func CollectiveReduceWaitFor(value []int64) CollectiveReduceAttr {
43206	return func(m optionalAttr) {
43207		m["wait_for"] = value
43208	}
43209}
43210
43211// CollectiveReduceCommunicationHint sets the optional communication_hint attribute to value.
43212// If not specified, defaults to "auto"
43213func CollectiveReduceCommunicationHint(value string) CollectiveReduceAttr {
43214	return func(m optionalAttr) {
43215		m["communication_hint"] = value
43216	}
43217}
43218
43219// CollectiveReduceTimeoutSeconds sets the optional timeout_seconds attribute to value.
43220// If not specified, defaults to 0
43221func CollectiveReduceTimeoutSeconds(value float32) CollectiveReduceAttr {
43222	return func(m optionalAttr) {
43223		m["timeout_seconds"] = value
43224	}
43225}
43226
43227// Mutually reduces multiple tensors of identical type and shape.
43228func CollectiveReduce(scope *Scope, input tf.Output, group_size int64, group_key int64, instance_key int64, merge_op string, final_op string, subdiv_offsets []int64, optional ...CollectiveReduceAttr) (data tf.Output) {
43229	if scope.Err() != nil {
43230		return
43231	}
43232	attrs := map[string]interface{}{"group_size": group_size, "group_key": group_key, "instance_key": instance_key, "merge_op": merge_op, "final_op": final_op, "subdiv_offsets": subdiv_offsets}
43233	for _, a := range optional {
43234		a(attrs)
43235	}
43236	opspec := tf.OpSpec{
43237		Type: "CollectiveReduce",
43238		Input: []tf.Input{
43239			input,
43240		},
43241		Attrs: attrs,
43242	}
43243	op := scope.AddOperation(opspec)
43244	return op.Output(0)
43245}
43246
43247// ResourceApplyAdaMaxAttr is an optional argument to ResourceApplyAdaMax.
43248type ResourceApplyAdaMaxAttr func(optionalAttr)
43249
43250// ResourceApplyAdaMaxUseLocking sets the optional use_locking attribute to value.
43251//
43252// value: If `True`, updating of the var, m, and v tensors will be protected
43253// by a lock; otherwise the behavior is undefined, but may exhibit less
43254// contention.
43255// If not specified, defaults to false
43256func ResourceApplyAdaMaxUseLocking(value bool) ResourceApplyAdaMaxAttr {
43257	return func(m optionalAttr) {
43258		m["use_locking"] = value
43259	}
43260}
43261
43262// Update '*var' according to the AdaMax algorithm.
43263//
43264// m_t <- beta1 * m_{t-1} + (1 - beta1) * g
43265// v_t <- max(beta2 * v_{t-1}, abs(g))
43266// variable <- variable - learning_rate / (1 - beta1^t) * m_t / (v_t + epsilon)
43267//
43268// Arguments:
43269//	var_: Should be from a Variable().
43270//	m: Should be from a Variable().
43271//	v: Should be from a Variable().
43272//	beta1_power: Must be a scalar.
43273//	lr: Scaling factor. Must be a scalar.
43274//	beta1: Momentum factor. Must be a scalar.
43275//	beta2: Momentum factor. Must be a scalar.
43276//	epsilon: Ridge term. Must be a scalar.
43277//	grad: The gradient.
43278//
43279// Returns the created operation.
43280func ResourceApplyAdaMax(scope *Scope, var_ tf.Output, m tf.Output, v tf.Output, beta1_power tf.Output, lr tf.Output, beta1 tf.Output, beta2 tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyAdaMaxAttr) (o *tf.Operation) {
43281	if scope.Err() != nil {
43282		return
43283	}
43284	attrs := map[string]interface{}{}
43285	for _, a := range optional {
43286		a(attrs)
43287	}
43288	opspec := tf.OpSpec{
43289		Type: "ResourceApplyAdaMax",
43290		Input: []tf.Input{
43291			var_, m, v, beta1_power, lr, beta1, beta2, epsilon, grad,
43292		},
43293		Attrs: attrs,
43294	}
43295	return scope.AddOperation(opspec)
43296}
43297
43298// Computes the LSTM cell backward propagation for the entire time sequence.
43299//
43300// This implementation is to be used in conjunction of BlockLSTMV2.
43301//
43302// Arguments:
43303//	seq_len_max: Maximum time length actually used by this input. Outputs are padded
43304// with zeros beyond this length.
43305//	x: The sequence input to the LSTM, shape (timelen, batch_size, num_inputs).
43306//	cs_prev: Value of the initial cell state.
43307//	h_prev: Initial output of cell (to be used for peephole).
43308//	w: The weight matrix.
43309//	wci: The weight matrix for input gate peephole connection.
43310//	wcf: The weight matrix for forget gate peephole connection.
43311//	wco: The weight matrix for output gate peephole connection.
43312//	b: The bias vector.
43313//	i: The input gate over the whole time sequence.
43314//	cs: The cell state before the tanh over the whole time sequence.
43315//	f: The forget gate over the whole time sequence.
43316//	o: The output gate over the whole time sequence.
43317//	ci: The cell input over the whole time sequence.
43318//	co: The cell after the tanh over the whole time sequence.
43319//	h: The output h vector over the whole time sequence.
43320//	cs_grad: The current gradient of cs.
43321//	h_grad: The gradient of h vector.
43322//	use_peephole: Whether to use peephole weights.
43323//
43324// Returns:
43325//	x_grad: The gradient of x to be back-propped.
43326//	cs_prev_grad: The gradient of cs_prev to be back-propped.
43327//	h_prev_grad: The gradient of h_prev to be back-propped.
43328//	w_grad: The gradient for w to be back-propped.
43329//	wci_grad: The gradient for wci to be back-propped.
43330//	wcf_grad: The gradient for wcf to be back-propped.
43331//	wco_grad: The gradient for wco to be back-propped.
43332//	b_grad: The gradient for w to be back-propped.
43333func BlockLSTMGradV2(scope *Scope, seq_len_max tf.Output, x tf.Output, cs_prev tf.Output, h_prev tf.Output, w tf.Output, wci tf.Output, wcf tf.Output, wco tf.Output, b tf.Output, i tf.Output, cs tf.Output, f tf.Output, o tf.Output, ci tf.Output, co tf.Output, h tf.Output, cs_grad tf.Output, h_grad tf.Output, use_peephole bool) (x_grad tf.Output, cs_prev_grad tf.Output, h_prev_grad tf.Output, w_grad tf.Output, wci_grad tf.Output, wcf_grad tf.Output, wco_grad tf.Output, b_grad tf.Output) {
43334	if scope.Err() != nil {
43335		return
43336	}
43337	attrs := map[string]interface{}{"use_peephole": use_peephole}
43338	opspec := tf.OpSpec{
43339		Type: "BlockLSTMGradV2",
43340		Input: []tf.Input{
43341			seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad,
43342		},
43343		Attrs: attrs,
43344	}
43345	op := scope.AddOperation(opspec)
43346	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4), op.Output(5), op.Output(6), op.Output(7)
43347}
43348
43349// Returns the element-wise max of two SparseTensors.
43350//
43351// Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
43352//
43353// Arguments:
43354//	a_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
43355// SparseTensor, in the canonical lexicographic ordering.
43356//	a_values: 1-D.  `N` non-empty values corresponding to `a_indices`.
43357//	a_shape: 1-D.  Shape of the input SparseTensor.
43358//	b_indices: counterpart to `a_indices` for the other operand.
43359//	b_values: counterpart to `a_values` for the other operand; must be of the same dtype.
43360//	b_shape: counterpart to `a_shape` for the other operand; the two shapes must be equal.
43361//
43362// Returns:
43363//	output_indices: 2-D.  The indices of the output SparseTensor.
43364//	output_values: 1-D.  The values of the output SparseTensor.
43365func SparseSparseMaximum(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, b_indices tf.Output, b_values tf.Output, b_shape tf.Output) (output_indices tf.Output, output_values tf.Output) {
43366	if scope.Err() != nil {
43367		return
43368	}
43369	opspec := tf.OpSpec{
43370		Type: "SparseSparseMaximum",
43371		Input: []tf.Input{
43372			a_indices, a_values, a_shape, b_indices, b_values, b_shape,
43373		},
43374	}
43375	op := scope.AddOperation(opspec)
43376	return op.Output(0), op.Output(1)
43377}
43378
43379// QuantizeV2Attr is an optional argument to QuantizeV2.
43380type QuantizeV2Attr func(optionalAttr)
43381
43382// QuantizeV2Mode sets the optional mode attribute to value.
43383// If not specified, defaults to "MIN_COMBINED"
43384func QuantizeV2Mode(value string) QuantizeV2Attr {
43385	return func(m optionalAttr) {
43386		m["mode"] = value
43387	}
43388}
43389
43390// QuantizeV2RoundMode sets the optional round_mode attribute to value.
43391// If not specified, defaults to "HALF_AWAY_FROM_ZERO"
43392func QuantizeV2RoundMode(value string) QuantizeV2Attr {
43393	return func(m optionalAttr) {
43394		m["round_mode"] = value
43395	}
43396}
43397
43398// QuantizeV2NarrowRange sets the optional narrow_range attribute to value.
43399// If not specified, defaults to false
43400func QuantizeV2NarrowRange(value bool) QuantizeV2Attr {
43401	return func(m optionalAttr) {
43402		m["narrow_range"] = value
43403	}
43404}
43405
43406// QuantizeV2Axis sets the optional axis attribute to value.
43407// If not specified, defaults to -1
43408func QuantizeV2Axis(value int64) QuantizeV2Attr {
43409	return func(m optionalAttr) {
43410		m["axis"] = value
43411	}
43412}
43413
43414// QuantizeV2EnsureMinimumRange sets the optional ensure_minimum_range attribute to value.
43415// If not specified, defaults to 0.01
43416func QuantizeV2EnsureMinimumRange(value float32) QuantizeV2Attr {
43417	return func(m optionalAttr) {
43418		m["ensure_minimum_range"] = value
43419	}
43420}
43421
43422// Quantize the 'input' tensor of type float to 'output' tensor of type 'T'.
43423//
43424// [min_range, max_range] are scalar floats that specify the range for
43425// the 'input' data. The 'mode' attribute controls exactly which calculations are
43426// used to convert the float values to their quantized equivalents.  The
43427// 'round_mode' attribute controls which rounding tie-breaking algorithm is used
43428// when rounding float values to their quantized equivalents.
43429//
43430// In 'MIN_COMBINED' mode, each value of the tensor will undergo the following:
43431//
43432// ```
43433// out[i] = (in[i] - min_range) * range(T) / (max_range - min_range)
43434// if T == qint8: out[i] -= (range(T) + 1) / 2.0
43435// ```
43436//
43437// here `range(T) = numeric_limits<T>::max() - numeric_limits<T>::min()`
43438//
43439// *MIN_COMBINED Mode Example*
43440//
43441// Assume the input is type float and has a possible range of [0.0, 6.0] and the
43442// output type is quint8 ([0, 255]). The min_range and max_range values should be
43443// specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each
43444// value of the input by 255/6 and cast to quint8.
43445//
43446// If the output type was qint8 ([-128, 127]), the operation will additionally
43447// subtract each value by 128 prior to casting, so that the range of values aligns
43448// with the range of qint8.
43449//
43450// If the mode is 'MIN_FIRST', then this approach is used:
43451//
43452// ```
43453// num_discrete_values = 1 << (# of bits in T)
43454// range_adjust = num_discrete_values / (num_discrete_values - 1)
43455// range = (range_max - range_min) * range_adjust
43456// range_scale = num_discrete_values / range
43457// quantized = round(input * range_scale) - round(range_min * range_scale) +
43458//   numeric_limits<T>::min()
43459// quantized = max(quantized, numeric_limits<T>::min())
43460// quantized = min(quantized, numeric_limits<T>::max())
43461// ```
43462//
43463// The biggest difference between this and MIN_COMBINED is that the minimum range
43464// is rounded first, before it's subtracted from the rounded value. With
43465// MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing
43466// and dequantizing will introduce a larger and larger error.
43467//
43468// *SCALED mode Example*
43469//
43470// `SCALED` mode matches the quantization approach used in
43471// `QuantizeAndDequantize{V2|V3}`.
43472//
43473// If the mode is `SCALED`, the quantization is performed by multiplying each
43474// input value by a scaling_factor.
43475// The scaling_factor is determined from `min_range` and `max_range` to be as large
43476// as possible such that the range from `min_range` to `max_range` is representable
43477// within values of type T.
43478//
43479// ```c++
43480//
43481//   const int min_T = std::numeric_limits<T>::min();
43482//   const int max_T = std::numeric_limits<T>::max();
43483//   const float max_float = std::numeric_limits<float>::max();
43484//
43485//   const float scale_factor_from_min_side =
43486//       (min_T * min_range > 0) ? min_T / min_range : max_float;
43487//   const float scale_factor_from_max_side =
43488//       (max_T * max_range > 0) ? max_T / max_range : max_float;
43489//
43490//   const float scale_factor = std::min(scale_factor_from_min_side,
43491//                                       scale_factor_from_max_side);
43492// ```
43493//
43494// We next use the scale_factor to adjust min_range and max_range as follows:
43495//
43496// ```c++
43497//       min_range = min_T / scale_factor;
43498//       max_range = max_T / scale_factor;
43499// ```
43500//
43501//
43502// e.g. if T = qint8, and initially min_range = -10, and max_range = 9, we would
43503// compare -128/-10.0 = 12.8 to 127/9.0 = 14.11, and set scaling_factor = 12.8
43504// In this case, min_range would remain -10, but max_range would be adjusted to
43505// 127 / 12.8 = 9.921875
43506//
43507// So we will quantize input values in the range (-10, 9.921875) to (-128, 127).
43508//
43509// The input tensor can now be quantized by clipping values to the range
43510// `min_range` to `max_range`, then multiplying by scale_factor as follows:
43511//
43512// ```c++
43513// result = round(min(max_range, max(min_range, input)) * scale_factor)
43514// ```
43515//
43516// The adjusted `min_range` and `max_range` are returned as outputs 2 and 3 of
43517// this operation. These outputs should be used as the range for any further
43518// calculations.
43519//
43520//
43521// *narrow_range (bool) attribute*
43522//
43523// If true, we do not use the minimum quantized value.
43524// i.e. for int8 the quantized output, it would be restricted to the range
43525// -127..127 instead of the full -128..127 range.
43526// This is provided for compatibility with certain inference backends.
43527// (Only applies to SCALED mode)
43528//
43529//
43530// *axis (int) attribute*
43531//
43532// An optional `axis` attribute can specify a dimension index of the input tensor,
43533// such that quantization ranges will be calculated and applied separately for each
43534// slice of the tensor along that dimension. This is useful for per-channel
43535// quantization.
43536//
43537// If axis is specified, min_range and max_range
43538//
43539// if `axis`=None, per-tensor quantization is performed as normal.
43540//
43541//
43542// *ensure_minimum_range (float) attribute*
43543//
43544// Ensures the minimum quantization range is at least this value.
43545// The legacy default value for this is 0.01, but it is strongly suggested to
43546// set it to 0 for new uses.
43547//
43548//
43549// Arguments:
43550//
43551//	min_range: The minimum value of the quantization range. This value may be adjusted by the
43552// op depending on other parameters. The adjusted value is written to `output_min`.
43553// If the `axis` attribute is specified, this must be a 1-D tensor whose size
43554// matches the `axis` dimension of the input and output tensors.
43555//	max_range: The maximum value of the quantization range. This value may be adjusted by the
43556// op depending on other parameters. The adjusted value is written to `output_max`.
43557// If the `axis` attribute is specified, this must be a 1-D tensor whose size
43558// matches the `axis` dimension of the input and output tensors.
43559//
43560//
43561// Returns:
43562//	output: The quantized data produced from the float input.
43563//	output_min: The final quantization range minimum, used to clip input values before scaling
43564// and rounding them to quantized values.
43565// If the `axis` attribute is specified, this will be a 1-D tensor whose size
43566// matches the `axis` dimension of the input and output tensors.
43567//	output_max: The final quantization range maximum, used to clip input values before scaling
43568// and rounding them to quantized values.
43569// If the `axis` attribute is specified, this will be a 1-D tensor whose size
43570// matches the `axis` dimension of the input and output tensors.
43571func QuantizeV2(scope *Scope, input tf.Output, min_range tf.Output, max_range tf.Output, T tf.DataType, optional ...QuantizeV2Attr) (output tf.Output, output_min tf.Output, output_max tf.Output) {
43572	if scope.Err() != nil {
43573		return
43574	}
43575	attrs := map[string]interface{}{"T": T}
43576	for _, a := range optional {
43577		a(attrs)
43578	}
43579	opspec := tf.OpSpec{
43580		Type: "QuantizeV2",
43581		Input: []tf.Input{
43582			input, min_range, max_range,
43583		},
43584		Attrs: attrs,
43585	}
43586	op := scope.AddOperation(opspec)
43587	return op.Output(0), op.Output(1), op.Output(2)
43588}
43589
43590// Returns the truth value of (x >= y) element-wise.
43591//
43592// *NOTE*: `GreaterEqual` supports broadcasting. More about broadcasting
43593// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
43594//
43595// Example:
43596//
43597// ```python
43598// x = tf.constant([5, 4, 6, 7])
43599// y = tf.constant([5, 2, 5, 10])
43600// tf.math.greater_equal(x, y) ==> [True, True, True, False]
43601//
43602// x = tf.constant([5, 4, 6, 7])
43603// y = tf.constant([5])
43604// tf.math.greater_equal(x, y) ==> [True, False, True, True]
43605// ```
43606func GreaterEqual(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
43607	if scope.Err() != nil {
43608		return
43609	}
43610	opspec := tf.OpSpec{
43611		Type: "GreaterEqual",
43612		Input: []tf.Input{
43613			x, y,
43614		},
43615	}
43616	op := scope.AddOperation(opspec)
43617	return op.Output(0)
43618}
43619
43620// BatchAttr is an optional argument to Batch.
43621type BatchAttr func(optionalAttr)
43622
43623// BatchMaxEnqueuedBatches sets the optional max_enqueued_batches attribute to value.
43624// If not specified, defaults to 10
43625func BatchMaxEnqueuedBatches(value int64) BatchAttr {
43626	return func(m optionalAttr) {
43627		m["max_enqueued_batches"] = value
43628	}
43629}
43630
43631// BatchAllowedBatchSizes sets the optional allowed_batch_sizes attribute to value.
43632// If not specified, defaults to <>
43633func BatchAllowedBatchSizes(value []int64) BatchAttr {
43634	return func(m optionalAttr) {
43635		m["allowed_batch_sizes"] = value
43636	}
43637}
43638
43639// BatchContainer sets the optional container attribute to value.
43640// If not specified, defaults to ""
43641func BatchContainer(value string) BatchAttr {
43642	return func(m optionalAttr) {
43643		m["container"] = value
43644	}
43645}
43646
43647// BatchSharedName sets the optional shared_name attribute to value.
43648// If not specified, defaults to ""
43649func BatchSharedName(value string) BatchAttr {
43650	return func(m optionalAttr) {
43651		m["shared_name"] = value
43652	}
43653}
43654
43655// BatchBatchingQueue sets the optional batching_queue attribute to value.
43656// If not specified, defaults to ""
43657func BatchBatchingQueue(value string) BatchAttr {
43658	return func(m optionalAttr) {
43659		m["batching_queue"] = value
43660	}
43661}
43662
43663// Batches all input tensors nondeterministically.
43664//
43665// When many instances of this Op are being run concurrently with the same
43666// container/shared_name in the same device, some will output zero-shaped Tensors
43667// and others will output Tensors of size up to max_batch_size.
43668//
43669// All Tensors in in_tensors are batched together (so, for example, labels and
43670// features should be batched with a single instance of this operation.
43671//
43672// Each invocation of batch emits an `id` scalar which will be used to identify
43673// this particular invocation when doing unbatch or its gradient.
43674//
43675// Each op which emits a non-empty batch will also emit a non-empty batch_index
43676// Tensor, which, is a [K, 3] matrix where each row contains the invocation's id,
43677// start, and length of elements of each set of Tensors present in batched_tensors.
43678//
43679// Batched tensors are concatenated along the first dimension, and all tensors in
43680// in_tensors must have the first dimension of the same size.
43681//
43682// in_tensors: The tensors to be batched.
43683// num_batch_threads: Number of scheduling threads for processing batches of work.
43684//  Determines the number of batches processed in parallel.
43685// max_batch_size: Batch sizes will never be bigger than this.
43686// batch_timeout_micros: Maximum number of microseconds to wait before outputting
43687//  an incomplete batch.
43688// allowed_batch_sizes: Optional list of allowed batch sizes. If left empty, does
43689//  nothing. Otherwise, supplies a list of batch sizes, causing the op to pad
43690//  batches up to one of those sizes. The entries must increase monotonically, and
43691//  the final entry must equal max_batch_size.
43692// grad_timeout_micros: The timeout to use for the gradient. See Unbatch.
43693// batched_tensors: Either empty tensors or a batch of concatenated Tensors.
43694// batch_index: If out_tensors is non-empty, has information to invert it.
43695// container: Controls the scope of sharing of this batch.
43696// id: always contains a scalar with a unique ID for this invocation of Batch.
43697// shared_name: Concurrently running instances of batch in the same device with the
43698//  same container and shared_name will batch their elements together. If left
43699//  empty, the op name will be used as the shared name.
43700// T: the types of tensors to be batched.
43701func Batch(scope *Scope, in_tensors []tf.Output, num_batch_threads int64, max_batch_size int64, batch_timeout_micros int64, grad_timeout_micros int64, optional ...BatchAttr) (batched_tensors []tf.Output, batch_index tf.Output, id tf.Output) {
43702	if scope.Err() != nil {
43703		return
43704	}
43705	attrs := map[string]interface{}{"num_batch_threads": num_batch_threads, "max_batch_size": max_batch_size, "batch_timeout_micros": batch_timeout_micros, "grad_timeout_micros": grad_timeout_micros}
43706	for _, a := range optional {
43707		a(attrs)
43708	}
43709	opspec := tf.OpSpec{
43710		Type: "Batch",
43711		Input: []tf.Input{
43712			tf.OutputList(in_tensors),
43713		},
43714		Attrs: attrs,
43715	}
43716	op := scope.AddOperation(opspec)
43717	if scope.Err() != nil {
43718		return
43719	}
43720	var idx int
43721	var err error
43722	if batched_tensors, idx, err = makeOutputList(op, idx, "batched_tensors"); err != nil {
43723		scope.UpdateErr("Batch", err)
43724		return
43725	}
43726	batch_index = op.Output(idx)
43727	id = op.Output(idx)
43728	return batched_tensors, batch_index, id
43729}
43730
43731// Writes a tensor summary.
43732//
43733// Writes `tensor` at `step` with `tag` using summary `writer`.
43734//
43735// Returns the created operation.
43736func WriteSummary(scope *Scope, writer tf.Output, step tf.Output, tensor tf.Output, tag tf.Output, summary_metadata tf.Output) (o *tf.Operation) {
43737	if scope.Err() != nil {
43738		return
43739	}
43740	opspec := tf.OpSpec{
43741		Type: "WriteSummary",
43742		Input: []tf.Input{
43743			writer, step, tensor, tag, summary_metadata,
43744		},
43745	}
43746	return scope.AddOperation(opspec)
43747}
43748
43749// UnicodeDecodeAttr is an optional argument to UnicodeDecode.
43750type UnicodeDecodeAttr func(optionalAttr)
43751
43752// UnicodeDecodeErrors sets the optional errors attribute to value.
43753//
43754// value: Error handling policy when there is invalid formatting found in the input.
43755// The value of 'strict' will cause the operation to produce a InvalidArgument
43756// error on any invalid input formatting. A value of 'replace' (the default) will
43757// cause the operation to replace any invalid formatting in the input with the
43758// `replacement_char` codepoint. A value of 'ignore' will cause the operation to
43759// skip any invalid formatting in the input and produce no corresponding output
43760// character.
43761// If not specified, defaults to "replace"
43762func UnicodeDecodeErrors(value string) UnicodeDecodeAttr {
43763	return func(m optionalAttr) {
43764		m["errors"] = value
43765	}
43766}
43767
43768// UnicodeDecodeReplacementChar sets the optional replacement_char attribute to value.
43769//
43770// value: The replacement character codepoint to be used in place of any invalid
43771// formatting in the input when `errors='replace'`. Any valid unicode codepoint may
43772// be used. The default value is the default unicode replacement character is
43773// 0xFFFD or U+65533.)
43774// If not specified, defaults to 65533
43775func UnicodeDecodeReplacementChar(value int64) UnicodeDecodeAttr {
43776	return func(m optionalAttr) {
43777		m["replacement_char"] = value
43778	}
43779}
43780
43781// UnicodeDecodeReplaceControlCharacters sets the optional replace_control_characters attribute to value.
43782//
43783// value: Whether to replace the C0 control characters (00-1F) with the
43784// `replacement_char`. Default is false.
43785// If not specified, defaults to false
43786func UnicodeDecodeReplaceControlCharacters(value bool) UnicodeDecodeAttr {
43787	return func(m optionalAttr) {
43788		m["replace_control_characters"] = value
43789	}
43790}
43791
43792// UnicodeDecodeTsplits sets the optional Tsplits attribute to value.
43793// If not specified, defaults to DT_INT64
43794func UnicodeDecodeTsplits(value tf.DataType) UnicodeDecodeAttr {
43795	return func(m optionalAttr) {
43796		m["Tsplits"] = value
43797	}
43798}
43799
43800// Decodes each string in `input` into a sequence of Unicode code points.
43801//
43802// The character codepoints for all strings are returned using a single vector
43803// `char_values`, with strings expanded to characters in row-major order.
43804//
43805// The `row_splits` tensor indicates where the codepoints for
43806// each input string begin and end within the `char_values` tensor.
43807// In particular, the values for the `i`th
43808// string (in row-major order) are stored in the slice
43809// `[row_splits[i]:row_splits[i+1]]`. Thus:
43810//
43811// * `char_values[row_splits[i]+j]` is the Unicode codepoint for the `j`th
43812//   character in the `i`th string (in row-major order).
43813// * `row_splits[i+1] - row_splits[i]` is the number of characters in the `i`th
43814//   string (in row-major order).
43815//
43816// Arguments:
43817//	input: The text to be decoded. Can have any shape. Note that the output is flattened
43818// to a vector of char values.
43819//	input_encoding: Text encoding of the input strings. This is any of the encodings supported
43820// by ICU ucnv algorithmic converters. Examples: `"UTF-16", "US ASCII", "UTF-8"`.
43821//
43822// Returns:
43823//	row_splits: A 1D int32 tensor containing the row splits.
43824//	char_values: A 1D int32 Tensor containing the decoded codepoints.
43825func UnicodeDecode(scope *Scope, input tf.Output, input_encoding string, optional ...UnicodeDecodeAttr) (row_splits tf.Output, char_values tf.Output) {
43826	if scope.Err() != nil {
43827		return
43828	}
43829	attrs := map[string]interface{}{"input_encoding": input_encoding}
43830	for _, a := range optional {
43831		a(attrs)
43832	}
43833	opspec := tf.OpSpec{
43834		Type: "UnicodeDecode",
43835		Input: []tf.Input{
43836			input,
43837		},
43838		Attrs: attrs,
43839	}
43840	op := scope.AddOperation(opspec)
43841	return op.Output(0), op.Output(1)
43842}
43843
43844// LSTMBlockCellAttr is an optional argument to LSTMBlockCell.
43845type LSTMBlockCellAttr func(optionalAttr)
43846
43847// LSTMBlockCellForgetBias sets the optional forget_bias attribute to value.
43848//
43849// value: The forget gate bias.
43850// If not specified, defaults to 1
43851func LSTMBlockCellForgetBias(value float32) LSTMBlockCellAttr {
43852	return func(m optionalAttr) {
43853		m["forget_bias"] = value
43854	}
43855}
43856
43857// LSTMBlockCellCellClip sets the optional cell_clip attribute to value.
43858//
43859// value: Value to clip the 'cs' value to.
43860// If not specified, defaults to 3
43861func LSTMBlockCellCellClip(value float32) LSTMBlockCellAttr {
43862	return func(m optionalAttr) {
43863		m["cell_clip"] = value
43864	}
43865}
43866
43867// LSTMBlockCellUsePeephole sets the optional use_peephole attribute to value.
43868//
43869// value: Whether to use peephole weights.
43870// If not specified, defaults to false
43871func LSTMBlockCellUsePeephole(value bool) LSTMBlockCellAttr {
43872	return func(m optionalAttr) {
43873		m["use_peephole"] = value
43874	}
43875}
43876
43877// Computes the LSTM cell forward propagation for 1 time step.
43878//
43879// This implementation uses 1 weight matrix and 1 bias vector, and there's an
43880// optional peephole connection.
43881//
43882// This kernel op implements the following mathematical equations:
43883//
43884// ```python
43885// xh = [x, h_prev]
43886// [i, f, ci, o] = xh * w + b
43887// f = f + forget_bias
43888//
43889// if not use_peephole:
43890//   wci = wcf = wco = 0
43891//
43892// i = sigmoid(cs_prev * wci + i)
43893// f = sigmoid(cs_prev * wcf + f)
43894// ci = tanh(ci)
43895//
43896// cs = ci .* i + cs_prev .* f
43897// cs = clip(cs, cell_clip)
43898//
43899// o = sigmoid(cs * wco + o)
43900// co = tanh(cs)
43901// h = co .* o
43902// ```
43903//
43904// Arguments:
43905//	x: The input to the LSTM cell, shape (batch_size, num_inputs).
43906//	cs_prev: Value of the cell state at previous time step.
43907//	h_prev: Output of the previous cell at previous time step.
43908//	w: The weight matrix.
43909//	wci: The weight matrix for input gate peephole connection.
43910//	wcf: The weight matrix for forget gate peephole connection.
43911//	wco: The weight matrix for output gate peephole connection.
43912//	b: The bias vector.
43913//
43914// Returns:
43915//	i: The input gate.
43916//	cs: The cell state before the tanh.
43917//	f: The forget gate.
43918//	o: The output gate.
43919//	ci: The cell input.
43920//	co: The cell after the tanh.
43921//	h: The output h vector.
43922func LSTMBlockCell(scope *Scope, x tf.Output, cs_prev tf.Output, h_prev tf.Output, w tf.Output, wci tf.Output, wcf tf.Output, wco tf.Output, b tf.Output, optional ...LSTMBlockCellAttr) (i tf.Output, cs tf.Output, f tf.Output, o tf.Output, ci tf.Output, co tf.Output, h tf.Output) {
43923	if scope.Err() != nil {
43924		return
43925	}
43926	attrs := map[string]interface{}{}
43927	for _, a := range optional {
43928		a(attrs)
43929	}
43930	opspec := tf.OpSpec{
43931		Type: "LSTMBlockCell",
43932		Input: []tf.Input{
43933			x, cs_prev, h_prev, w, wci, wcf, wco, b,
43934		},
43935		Attrs: attrs,
43936	}
43937	op := scope.AddOperation(opspec)
43938	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4), op.Output(5), op.Output(6)
43939}
43940
43941// Check if the input matches the regex pattern.
43942//
43943// The input is a string tensor of any shape. The pattern is a scalar
43944// string tensor which is applied to every element of the input tensor.
43945// The boolean values (True or False) of the output tensor indicate
43946// if the input matches the regex pattern provided.
43947//
43948// The pattern follows the re2 syntax (https://github.com/google/re2/wiki/Syntax)
43949//
43950// Examples:
43951//
43952// >>> tf.strings.regex_full_match(["TF lib", "lib TF"], ".*lib$")
43953// <tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>
43954// >>> tf.strings.regex_full_match(["TF lib", "lib TF"], ".*TF$")
43955// <tf.Tensor: shape=(2,), dtype=bool, numpy=array([False,  True])>
43956//
43957// Arguments:
43958//	input: A string tensor of the text to be processed.
43959//	pattern: A scalar string tensor containing the regular expression to match the input.
43960//
43961// Returns A bool tensor with the same shape as `input`.
43962func RegexFullMatch(scope *Scope, input tf.Output, pattern tf.Output) (output tf.Output) {
43963	if scope.Err() != nil {
43964		return
43965	}
43966	opspec := tf.OpSpec{
43967		Type: "RegexFullMatch",
43968		Input: []tf.Input{
43969			input, pattern,
43970		},
43971	}
43972	op := scope.AddOperation(opspec)
43973	return op.Output(0)
43974}
43975
43976// ResourceSparseApplyAdagradV2Attr is an optional argument to ResourceSparseApplyAdagradV2.
43977type ResourceSparseApplyAdagradV2Attr func(optionalAttr)
43978
43979// ResourceSparseApplyAdagradV2UseLocking sets the optional use_locking attribute to value.
43980//
43981// value: If `True`, updating of the var and accum tensors will be protected
43982// by a lock; otherwise the behavior is undefined, but may exhibit less
43983// contention.
43984// If not specified, defaults to false
43985func ResourceSparseApplyAdagradV2UseLocking(value bool) ResourceSparseApplyAdagradV2Attr {
43986	return func(m optionalAttr) {
43987		m["use_locking"] = value
43988	}
43989}
43990
43991// ResourceSparseApplyAdagradV2UpdateSlots sets the optional update_slots attribute to value.
43992// If not specified, defaults to true
43993func ResourceSparseApplyAdagradV2UpdateSlots(value bool) ResourceSparseApplyAdagradV2Attr {
43994	return func(m optionalAttr) {
43995		m["update_slots"] = value
43996	}
43997}
43998
43999// Update relevant entries in '*var' and '*accum' according to the adagrad scheme.
44000//
44001// That is for rows we have grad for, we update var and accum as follows:
44002// accum += grad * grad
44003// var -= lr * grad * (1 / sqrt(accum))
44004//
44005// Arguments:
44006//	var_: Should be from a Variable().
44007//	accum: Should be from a Variable().
44008//	lr: Learning rate. Must be a scalar.
44009//	epsilon: Constant factor. Must be a scalar.
44010//	grad: The gradient.
44011//	indices: A vector of indices into the first dimension of var and accum.
44012//
44013// Returns the created operation.
44014func ResourceSparseApplyAdagradV2(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, epsilon tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyAdagradV2Attr) (o *tf.Operation) {
44015	if scope.Err() != nil {
44016		return
44017	}
44018	attrs := map[string]interface{}{}
44019	for _, a := range optional {
44020		a(attrs)
44021	}
44022	opspec := tf.OpSpec{
44023		Type: "ResourceSparseApplyAdagradV2",
44024		Input: []tf.Input{
44025			var_, accum, lr, epsilon, grad, indices,
44026		},
44027		Attrs: attrs,
44028	}
44029	return scope.AddOperation(opspec)
44030}
44031
44032// Returns x - y element-wise.
44033//
44034// *NOTE*: `Subtract` supports broadcasting. More about broadcasting
44035// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
44036func Sub(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
44037	if scope.Err() != nil {
44038		return
44039	}
44040	opspec := tf.OpSpec{
44041		Type: "Sub",
44042		Input: []tf.Input{
44043			x, y,
44044		},
44045	}
44046	op := scope.AddOperation(opspec)
44047	return op.Output(0)
44048}
44049
44050// Says whether the targets are in the top `K` predictions.
44051//
44052// This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
44053// prediction for the target class is among the top `k` predictions among
44054// all predictions for example `i`. Note that the behavior of `InTopK` differs
44055// from the `TopK` op in its handling of ties; if multiple classes have the
44056// same prediction value and straddle the top-`k` boundary, all of those
44057// classes are considered to be in the top `k`.
44058//
44059// More formally, let
44060//
44061//   \\(predictions_i\\) be the predictions for all classes for example `i`,
44062//   \\(targets_i\\) be the target class for example `i`,
44063//   \\(out_i\\) be the output for example `i`,
44064//
44065// $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
44066//
44067// Arguments:
44068//	predictions: A `batch_size` x `classes` tensor.
44069//	targets: A `batch_size` vector of class ids.
44070//	k: Number of top elements to look at for computing precision.
44071//
44072// Returns Computed Precision at `k` as a `bool Tensor`.
44073func InTopK(scope *Scope, predictions tf.Output, targets tf.Output, k int64) (precision tf.Output) {
44074	if scope.Err() != nil {
44075		return
44076	}
44077	attrs := map[string]interface{}{"k": k}
44078	opspec := tf.OpSpec{
44079		Type: "InTopK",
44080		Input: []tf.Input{
44081			predictions, targets,
44082		},
44083		Attrs: attrs,
44084	}
44085	op := scope.AddOperation(opspec)
44086	return op.Output(0)
44087}
44088
44089// A TPU core selector Op.
44090//
44091// This Op produces a set of TPU cores (for warm-up) or a single TPU core
44092// (for regular inference) to execute the TPU program on. The output is
44093// consumed by TPUPartitionedCall.
44094//
44095// Returns A vector 1 or more TPU cores.
44096func TPUOrdinalSelector(scope *Scope) (device_ordinals tf.Output) {
44097	if scope.Err() != nil {
44098		return
44099	}
44100	opspec := tf.OpSpec{
44101		Type: "TPUOrdinalSelector",
44102	}
44103	op := scope.AddOperation(opspec)
44104	return op.Output(0)
44105}
44106
44107// SubstrAttr is an optional argument to Substr.
44108type SubstrAttr func(optionalAttr)
44109
44110// SubstrUnit sets the optional unit attribute to value.
44111//
44112// value: The unit that is used to create the substring.  One of: `"BYTE"` (for
44113// defining position and length by bytes) or `"UTF8_CHAR"` (for the UTF-8
44114// encoded Unicode code points).  The default is `"BYTE"`. Results are undefined if
44115// `unit=UTF8_CHAR` and the `input` strings do not contain structurally valid
44116// UTF-8.
44117// If not specified, defaults to "BYTE"
44118func SubstrUnit(value string) SubstrAttr {
44119	return func(m optionalAttr) {
44120		m["unit"] = value
44121	}
44122}
44123
44124// Return substrings from `Tensor` of strings.
44125//
44126// For each string in the input `Tensor`, creates a substring starting at index
44127// `pos` with a total length of `len`.
44128//
44129// If `len` defines a substring that would extend beyond the length of the input
44130// string, or if `len` is negative, then as many characters as possible are used.
44131//
44132// A negative `pos` indicates distance within the string backwards from the end.
44133//
44134// If `pos` specifies an index which is out of range for any of the input strings,
44135// then an `InvalidArgumentError` is thrown.
44136//
44137// `pos` and `len` must have the same shape, otherwise a `ValueError` is thrown on
44138// Op creation.
44139//
44140// *NOTE*: `Substr` supports broadcasting up to two dimensions. More about
44141// broadcasting
44142// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
44143//
44144// ---
44145//
44146// Examples
44147//
44148// Using scalar `pos` and `len`:
44149//
44150// ```python
44151// input = [b'Hello', b'World']
44152// position = 1
44153// length = 3
44154//
44155// output = [b'ell', b'orl']
44156// ```
44157//
44158// Using `pos` and `len` with same shape as `input`:
44159//
44160// ```python
44161// input = [[b'ten', b'eleven', b'twelve'],
44162//          [b'thirteen', b'fourteen', b'fifteen'],
44163//          [b'sixteen', b'seventeen', b'eighteen']]
44164// position = [[1, 2, 3],
44165//             [1, 2, 3],
44166//             [1, 2, 3]]
44167// length =   [[2, 3, 4],
44168//             [4, 3, 2],
44169//             [5, 5, 5]]
44170//
44171// output = [[b'en', b'eve', b'lve'],
44172//           [b'hirt', b'urt', b'te'],
44173//           [b'ixtee', b'vente', b'hteen']]
44174// ```
44175//
44176// Broadcasting `pos` and `len` onto `input`:
44177//
44178// ```
44179// input = [[b'ten', b'eleven', b'twelve'],
44180//          [b'thirteen', b'fourteen', b'fifteen'],
44181//          [b'sixteen', b'seventeen', b'eighteen'],
44182//          [b'nineteen', b'twenty', b'twentyone']]
44183// position = [1, 2, 3]
44184// length =   [1, 2, 3]
44185//
44186// output = [[b'e', b'ev', b'lve'],
44187//           [b'h', b'ur', b'tee'],
44188//           [b'i', b've', b'hte'],
44189//           [b'i', b'en', b'nty']]
44190// ```
44191//
44192// Broadcasting `input` onto `pos` and `len`:
44193//
44194// ```
44195// input = b'thirteen'
44196// position = [1, 5, 7]
44197// length =   [3, 2, 1]
44198//
44199// output = [b'hir', b'ee', b'n']
44200// ```
44201//
44202// Raises:
44203//
44204//   * `ValueError`: If the first argument cannot be converted to a
44205//      Tensor of `dtype string`.
44206//   * `InvalidArgumentError`: If indices are out of range.
44207//   * `ValueError`: If `pos` and `len` are not the same shape.
44208//
44209//
44210// Arguments:
44211//	input: Tensor of strings
44212//	pos: Scalar defining the position of first character in each substring
44213//	len: Scalar defining the number of characters to include in each substring
44214//
44215// Returns Tensor of substrings
44216func Substr(scope *Scope, input tf.Output, pos tf.Output, len tf.Output, optional ...SubstrAttr) (output tf.Output) {
44217	if scope.Err() != nil {
44218		return
44219	}
44220	attrs := map[string]interface{}{}
44221	for _, a := range optional {
44222		a(attrs)
44223	}
44224	opspec := tf.OpSpec{
44225		Type: "Substr",
44226		Input: []tf.Input{
44227			input, pos, len,
44228		},
44229		Attrs: attrs,
44230	}
44231	op := scope.AddOperation(opspec)
44232	return op.Output(0)
44233}
44234
44235// Elementwise computes the bitwise AND of `x` and `y`.
44236//
44237// The result will have those bits set, that are set in both `x` and `y`. The
44238// computation is performed on the underlying representations of `x` and `y`.
44239//
44240// For example:
44241//
44242// ```python
44243// import tensorflow as tf
44244// from tensorflow.python.ops import bitwise_ops
44245// dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64,
44246//               tf.uint8, tf.uint16, tf.uint32, tf.uint64]
44247//
44248// for dtype in dtype_list:
44249//   lhs = tf.constant([0, 5, 3, 14], dtype=dtype)
44250//   rhs = tf.constant([5, 0, 7, 11], dtype=dtype)
44251//   exp = tf.constant([0, 0, 3, 10], dtype=tf.float32)
44252//
44253//   res = bitwise_ops.bitwise_and(lhs, rhs)
44254//   tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE
44255// ```
44256//
44257func BitwiseAnd(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
44258	if scope.Err() != nil {
44259		return
44260	}
44261	opspec := tf.OpSpec{
44262		Type: "BitwiseAnd",
44263		Input: []tf.Input{
44264			x, y,
44265		},
44266	}
44267	op := scope.AddOperation(opspec)
44268	return op.Output(0)
44269}
44270
44271// SerializeIteratorAttr is an optional argument to SerializeIterator.
44272type SerializeIteratorAttr func(optionalAttr)
44273
44274// SerializeIteratorExternalStatePolicy sets the optional external_state_policy attribute to value.
44275// If not specified, defaults to 0
44276func SerializeIteratorExternalStatePolicy(value int64) SerializeIteratorAttr {
44277	return func(m optionalAttr) {
44278		m["external_state_policy"] = value
44279	}
44280}
44281
44282// Converts the given `resource_handle` representing an iterator to a variant tensor.
44283//
44284// Arguments:
44285//	resource_handle: A handle to an iterator resource.
44286//
44287// Returns A variant tensor storing the state of the iterator contained in the
44288// resource.
44289func SerializeIterator(scope *Scope, resource_handle tf.Output, optional ...SerializeIteratorAttr) (serialized tf.Output) {
44290	if scope.Err() != nil {
44291		return
44292	}
44293	attrs := map[string]interface{}{}
44294	for _, a := range optional {
44295		a(attrs)
44296	}
44297	opspec := tf.OpSpec{
44298		Type: "SerializeIterator",
44299		Input: []tf.Input{
44300			resource_handle,
44301		},
44302		Attrs: attrs,
44303	}
44304	op := scope.AddOperation(opspec)
44305	return op.Output(0)
44306}
44307
44308// ResourceApplyCenteredRMSPropAttr is an optional argument to ResourceApplyCenteredRMSProp.
44309type ResourceApplyCenteredRMSPropAttr func(optionalAttr)
44310
44311// ResourceApplyCenteredRMSPropUseLocking sets the optional use_locking attribute to value.
44312//
44313// value: If `True`, updating of the var, mg, ms, and mom tensors is
44314// protected by a lock; otherwise the behavior is undefined, but may exhibit less
44315// contention.
44316// If not specified, defaults to false
44317func ResourceApplyCenteredRMSPropUseLocking(value bool) ResourceApplyCenteredRMSPropAttr {
44318	return func(m optionalAttr) {
44319		m["use_locking"] = value
44320	}
44321}
44322
44323// Update '*var' according to the centered RMSProp algorithm.
44324//
44325// The centered RMSProp algorithm uses an estimate of the centered second moment
44326// (i.e., the variance) for normalization, as opposed to regular RMSProp, which
44327// uses the (uncentered) second moment. This often helps with training, but is
44328// slightly more expensive in terms of computation and memory.
44329//
44330// Note that in dense implementation of this algorithm, mg, ms, and mom will
44331// update even if the grad is zero, but in this sparse implementation, mg, ms,
44332// and mom will not update in iterations during which the grad is zero.
44333//
44334// mean_square = decay * mean_square + (1-decay) * gradient ** 2
44335// mean_grad = decay * mean_grad + (1-decay) * gradient
44336//
44337// Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
44338//
44339// mg <- rho * mg_{t-1} + (1-rho) * grad
44340// ms <- rho * ms_{t-1} + (1-rho) * grad * grad
44341// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon)
44342// var <- var - mom
44343//
44344// Arguments:
44345//	var_: Should be from a Variable().
44346//	mg: Should be from a Variable().
44347//	ms: Should be from a Variable().
44348//	mom: Should be from a Variable().
44349//	lr: Scaling factor. Must be a scalar.
44350//	rho: Decay rate. Must be a scalar.
44351//	momentum: Momentum Scale. Must be a scalar.
44352//	epsilon: Ridge term. Must be a scalar.
44353//	grad: The gradient.
44354//
44355// Returns the created operation.
44356func ResourceApplyCenteredRMSProp(scope *Scope, var_ tf.Output, mg tf.Output, ms tf.Output, mom tf.Output, lr tf.Output, rho tf.Output, momentum tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyCenteredRMSPropAttr) (o *tf.Operation) {
44357	if scope.Err() != nil {
44358		return
44359	}
44360	attrs := map[string]interface{}{}
44361	for _, a := range optional {
44362		a(attrs)
44363	}
44364	opspec := tf.OpSpec{
44365		Type: "ResourceApplyCenteredRMSProp",
44366		Input: []tf.Input{
44367			var_, mg, ms, mom, lr, rho, momentum, epsilon, grad,
44368		},
44369		Attrs: attrs,
44370	}
44371	return scope.AddOperation(opspec)
44372}
44373
44374// UnsortedSegmentJoinAttr is an optional argument to UnsortedSegmentJoin.
44375type UnsortedSegmentJoinAttr func(optionalAttr)
44376
44377// UnsortedSegmentJoinSeparator sets the optional separator attribute to value.
44378//
44379// value: The separator to use when joining.
44380// If not specified, defaults to ""
44381func UnsortedSegmentJoinSeparator(value string) UnsortedSegmentJoinAttr {
44382	return func(m optionalAttr) {
44383		m["separator"] = value
44384	}
44385}
44386
44387// Joins the elements of `inputs` based on `segment_ids`.
44388//
44389// Computes the string join along segments of a tensor.
44390// Given `segment_ids` with rank `N` and `data` with rank `N+M`:
44391//
44392//     `output[i, k1...kM] = strings.join([data[j1...jN, k1...kM])`
44393//
44394// where the join is over all [j1...jN] such that segment_ids[j1...jN] = i.
44395// Strings are joined in row-major order.
44396//
44397// For example:
44398//
44399// ```python
44400// inputs = [['Y', 'q', 'c'], ['Y', '6', '6'], ['p', 'G', 'a']]
44401// output_array = string_ops.unsorted_segment_join(inputs=inputs,
44402//                                                 segment_ids=[1, 0, 1],
44403//                                                 num_segments=2,
44404//                                                 separator=':'))
44405// # output_array ==> [['Y', '6', '6'], ['Y:p', 'q:G', 'c:a']]
44406//
44407//
44408// inputs = ['this', 'is', 'a', 'test']
44409// output_array = string_ops.unsorted_segment_join(inputs=inputs,
44410//                                                 segment_ids=[0, 0, 0, 0],
44411//                                                 num_segments=1,
44412//                                                 separator=':'))
44413// # output_array ==> ['this:is:a:test']
44414// ```
44415//
44416// Arguments:
44417//	inputs: The input to be joined.
44418//	segment_ids: A tensor whose shape is a prefix of data.shape.  Negative segment ids are not
44419// supported.
44420//	num_segments: A scalar.
44421func UnsortedSegmentJoin(scope *Scope, inputs tf.Output, segment_ids tf.Output, num_segments tf.Output, optional ...UnsortedSegmentJoinAttr) (output tf.Output) {
44422	if scope.Err() != nil {
44423		return
44424	}
44425	attrs := map[string]interface{}{}
44426	for _, a := range optional {
44427		a(attrs)
44428	}
44429	opspec := tf.OpSpec{
44430		Type: "UnsortedSegmentJoin",
44431		Input: []tf.Input{
44432			inputs, segment_ids, num_segments,
44433		},
44434		Attrs: attrs,
44435	}
44436	op := scope.AddOperation(opspec)
44437	return op.Output(0)
44438}
44439
44440// Outputs deterministic pseudorandom random numbers from a Poisson distribution.
44441//
44442// Outputs random values from a Poisson distribution.
44443//
44444// The outputs are a deterministic function of `shape`, `seed`, and `lam`.
44445//
44446// Arguments:
44447//	shape: The shape of the output tensor.
44448//	seed: 2 seeds (shape [2]).
44449//	lam: The rate of the Poisson distribution. Shape must match the rightmost dimensions
44450// of `shape`.
44451//	dtype: The type of the output.
44452//
44453// Returns Random values with specified shape.
44454func StatelessRandomPoisson(scope *Scope, shape tf.Output, seed tf.Output, lam tf.Output, dtype tf.DataType) (output tf.Output) {
44455	if scope.Err() != nil {
44456		return
44457	}
44458	attrs := map[string]interface{}{"dtype": dtype}
44459	opspec := tf.OpSpec{
44460		Type: "StatelessRandomPoisson",
44461		Input: []tf.Input{
44462			shape, seed, lam,
44463		},
44464		Attrs: attrs,
44465	}
44466	op := scope.AddOperation(opspec)
44467	return op.Output(0)
44468}
44469
44470// Returns the truth value of `NOT x` element-wise.
44471//
44472// Arguments:
44473//	x: A `Tensor` of type `bool`.
44474//
44475// Returns A `Tensor` of type `bool` with the same shape as `x`. The logical negation of `x`.
44476func LogicalNot(scope *Scope, x tf.Output) (y tf.Output) {
44477	if scope.Err() != nil {
44478		return
44479	}
44480	opspec := tf.OpSpec{
44481		Type: "LogicalNot",
44482		Input: []tf.Input{
44483			x,
44484		},
44485	}
44486	op := scope.AddOperation(opspec)
44487	return op.Output(0)
44488}
44489
44490// ImageProjectiveTransformV2Attr is an optional argument to ImageProjectiveTransformV2.
44491type ImageProjectiveTransformV2Attr func(optionalAttr)
44492
44493// ImageProjectiveTransformV2FillMode sets the optional fill_mode attribute to value.
44494//
44495// value: Fill mode, "REFLECT", "WRAP", or "CONSTANT".
44496// If not specified, defaults to "CONSTANT"
44497func ImageProjectiveTransformV2FillMode(value string) ImageProjectiveTransformV2Attr {
44498	return func(m optionalAttr) {
44499		m["fill_mode"] = value
44500	}
44501}
44502
44503// Applies the given transform to each of the images.
44504//
44505// If one row of `transforms` is `[a0, a1, a2, b0, b1, b2, c0, c1]`, then it maps
44506// the *output* point `(x, y)` to a transformed *input* point
44507// `(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`, where
44508// `k = c0 x + c1 y + 1`. If the transformed point lays outside of the input
44509// image, the output pixel is set to 0.
44510//
44511// Arguments:
44512//	images: 4-D with shape `[batch, height, width, channels]`.
44513//	transforms: 2-D Tensor, `[batch, 8]` or `[1, 8]` matrix, where each row corresponds to a 3 x 3
44514// projective transformation matrix, with the last entry assumed to be 1. If there
44515// is one row, the same transformation will be applied to all images.
44516//	output_shape: 1-D Tensor [new_height, new_width].
44517//	interpolation: Interpolation method, "NEAREST" or "BILINEAR".
44518//
44519// Returns 4-D with shape
44520// `[batch, new_height, new_width, channels]`.
44521func ImageProjectiveTransformV2(scope *Scope, images tf.Output, transforms tf.Output, output_shape tf.Output, interpolation string, optional ...ImageProjectiveTransformV2Attr) (transformed_images tf.Output) {
44522	if scope.Err() != nil {
44523		return
44524	}
44525	attrs := map[string]interface{}{"interpolation": interpolation}
44526	for _, a := range optional {
44527		a(attrs)
44528	}
44529	opspec := tf.OpSpec{
44530		Type: "ImageProjectiveTransformV2",
44531		Input: []tf.Input{
44532			images, transforms, output_shape,
44533		},
44534		Attrs: attrs,
44535	}
44536	op := scope.AddOperation(opspec)
44537	return op.Output(0)
44538}
44539
44540// The gradient of SparseFillEmptyRows.
44541//
44542// Takes vectors reverse_index_map, shaped `[N]`, and grad_values,
44543// shaped `[N_full]`, where `N_full >= N` and copies data into either
44544// `d_values` or `d_default_value`.  Here `d_values` is shaped `[N]` and
44545// `d_default_value` is a scalar.
44546//
44547//   d_values[j] = grad_values[reverse_index_map[j]]
44548//   d_default_value = sum_{k : 0 .. N_full - 1} (
44549//      grad_values[k] * 1{k not in reverse_index_map})
44550//
44551// Arguments:
44552//	reverse_index_map: 1-D.  The reverse index map from SparseFillEmptyRows.
44553//	grad_values: 1-D.  The gradients from backprop.
44554//
44555// Returns:
44556//	d_values: 1-D.  The backprop into values.
44557//	d_default_value: 0-D.  The backprop into default_value.
44558func SparseFillEmptyRowsGrad(scope *Scope, reverse_index_map tf.Output, grad_values tf.Output) (d_values tf.Output, d_default_value tf.Output) {
44559	if scope.Err() != nil {
44560		return
44561	}
44562	opspec := tf.OpSpec{
44563		Type: "SparseFillEmptyRowsGrad",
44564		Input: []tf.Input{
44565			reverse_index_map, grad_values,
44566		},
44567	}
44568	op := scope.AddOperation(opspec)
44569	return op.Output(0), op.Output(1)
44570}
44571
44572// Reshapes a SparseTensor to represent values in a new dense shape.
44573//
44574// This operation has the same semantics as reshape on the represented dense
44575// tensor.  The `input_indices` are recomputed based on the requested `new_shape`.
44576//
44577// If one component of `new_shape` is the special value -1, the size of that
44578// dimension is computed so that the total dense size remains constant.  At
44579// most one component of `new_shape` can be -1.  The number of dense elements
44580// implied by `new_shape` must be the same as the number of dense elements
44581// originally implied by `input_shape`.
44582//
44583// Reshaping does not affect the order of values in the SparseTensor.
44584//
44585// If the input tensor has rank `R_in` and `N` non-empty values, and `new_shape`
44586// has length `R_out`, then `input_indices` has shape `[N, R_in]`,
44587// `input_shape` has length `R_in`, `output_indices` has shape `[N, R_out]`, and
44588// `output_shape` has length `R_out`.
44589//
44590// Arguments:
44591//	input_indices: 2-D.  `N x R_in` matrix with the indices of non-empty values in a
44592// SparseTensor.
44593//	input_shape: 1-D.  `R_in` vector with the input SparseTensor's dense shape.
44594//	new_shape: 1-D.  `R_out` vector with the requested new dense shape.
44595//
44596// Returns:
44597//	output_indices: 2-D.  `N x R_out` matrix with the updated indices of non-empty
44598// values in the output SparseTensor.
44599//	output_shape: 1-D.  `R_out` vector with the full dense shape of the output
44600// SparseTensor.  This is the same as `new_shape` but with any -1 dimensions
44601// filled in.
44602func SparseReshape(scope *Scope, input_indices tf.Output, input_shape tf.Output, new_shape tf.Output) (output_indices tf.Output, output_shape tf.Output) {
44603	if scope.Err() != nil {
44604		return
44605	}
44606	opspec := tf.OpSpec{
44607		Type: "SparseReshape",
44608		Input: []tf.Input{
44609			input_indices, input_shape, new_shape,
44610		},
44611	}
44612	op := scope.AddOperation(opspec)
44613	return op.Output(0), op.Output(1)
44614}
44615
44616// Says whether the targets are in the top `K` predictions.
44617//
44618// This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
44619// prediction for the target class is among the top `k` predictions among
44620// all predictions for example `i`. Note that the behavior of `InTopK` differs
44621// from the `TopK` op in its handling of ties; if multiple classes have the
44622// same prediction value and straddle the top-`k` boundary, all of those
44623// classes are considered to be in the top `k`.
44624//
44625// More formally, let
44626//
44627//   \\(predictions_i\\) be the predictions for all classes for example `i`,
44628//   \\(targets_i\\) be the target class for example `i`,
44629//   \\(out_i\\) be the output for example `i`,
44630//
44631// $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
44632//
44633// Arguments:
44634//	predictions: A `batch_size` x `classes` tensor.
44635//	targets: A `batch_size` vector of class ids.
44636//	k: Number of top elements to look at for computing precision.
44637//
44638// Returns Computed precision at `k` as a `bool Tensor`.
44639func InTopKV2(scope *Scope, predictions tf.Output, targets tf.Output, k tf.Output) (precision tf.Output) {
44640	if scope.Err() != nil {
44641		return
44642	}
44643	opspec := tf.OpSpec{
44644		Type: "InTopKV2",
44645		Input: []tf.Input{
44646			predictions, targets, k,
44647		},
44648	}
44649	op := scope.AddOperation(opspec)
44650	return op.Output(0)
44651}
44652
44653// Creates an Optional variant with no value.
44654func OptionalNone(scope *Scope) (optional tf.Output) {
44655	if scope.Err() != nil {
44656		return
44657	}
44658	opspec := tf.OpSpec{
44659		Type: "OptionalNone",
44660	}
44661	op := scope.AddOperation(opspec)
44662	return op.Output(0)
44663}
44664
44665// RetrieveTPUEmbeddingStochasticGradientDescentParametersAttr is an optional argument to RetrieveTPUEmbeddingStochasticGradientDescentParameters.
44666type RetrieveTPUEmbeddingStochasticGradientDescentParametersAttr func(optionalAttr)
44667
44668// RetrieveTPUEmbeddingStochasticGradientDescentParametersTableId sets the optional table_id attribute to value.
44669// If not specified, defaults to -1
44670func RetrieveTPUEmbeddingStochasticGradientDescentParametersTableId(value int64) RetrieveTPUEmbeddingStochasticGradientDescentParametersAttr {
44671	return func(m optionalAttr) {
44672		m["table_id"] = value
44673	}
44674}
44675
44676// RetrieveTPUEmbeddingStochasticGradientDescentParametersTableName sets the optional table_name attribute to value.
44677// If not specified, defaults to ""
44678func RetrieveTPUEmbeddingStochasticGradientDescentParametersTableName(value string) RetrieveTPUEmbeddingStochasticGradientDescentParametersAttr {
44679	return func(m optionalAttr) {
44680		m["table_name"] = value
44681	}
44682}
44683
44684// RetrieveTPUEmbeddingStochasticGradientDescentParametersConfig sets the optional config attribute to value.
44685// If not specified, defaults to ""
44686func RetrieveTPUEmbeddingStochasticGradientDescentParametersConfig(value string) RetrieveTPUEmbeddingStochasticGradientDescentParametersAttr {
44687	return func(m optionalAttr) {
44688		m["config"] = value
44689	}
44690}
44691
44692// Retrieve SGD embedding parameters.
44693//
44694// An op that retrieves optimization parameters from embedding to host
44695// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
44696// the correct embedding table configuration. For example, this op is
44697// used to retrieve updated parameters before saving a checkpoint.
44698//
44699// Returns Parameter parameters updated by the stochastic gradient descent optimization algorithm.
44700func RetrieveTPUEmbeddingStochasticGradientDescentParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingStochasticGradientDescentParametersAttr) (parameters tf.Output) {
44701	if scope.Err() != nil {
44702		return
44703	}
44704	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
44705	for _, a := range optional {
44706		a(attrs)
44707	}
44708	opspec := tf.OpSpec{
44709		Type: "RetrieveTPUEmbeddingStochasticGradientDescentParameters",
44710
44711		Attrs: attrs,
44712	}
44713	op := scope.AddOperation(opspec)
44714	return op.Output(0)
44715}
44716
44717// Deprecated. Use TensorArraySplitV3
44718//
44719// DEPRECATED at GraphDef version 26: Use TensorArraySplitV3
44720func TensorArraySplitV2(scope *Scope, handle tf.Output, value tf.Output, lengths tf.Output, flow_in tf.Output) (flow_out tf.Output) {
44721	if scope.Err() != nil {
44722		return
44723	}
44724	opspec := tf.OpSpec{
44725		Type: "TensorArraySplitV2",
44726		Input: []tf.Input{
44727			handle, value, lengths, flow_in,
44728		},
44729	}
44730	op := scope.AddOperation(opspec)
44731	return op.Output(0)
44732}
44733
44734// UpperBoundAttr is an optional argument to UpperBound.
44735type UpperBoundAttr func(optionalAttr)
44736
44737// UpperBoundOutType sets the optional out_type attribute to value.
44738// If not specified, defaults to DT_INT32
44739func UpperBoundOutType(value tf.DataType) UpperBoundAttr {
44740	return func(m optionalAttr) {
44741		m["out_type"] = value
44742	}
44743}
44744
44745// Applies upper_bound(sorted_search_values, values) along each row.
44746//
44747// Each set of rows with the same index in (sorted_inputs, values) is treated
44748// independently.  The resulting row is the equivalent of calling
44749// `np.searchsorted(sorted_inputs, values, side='right')`.
44750//
44751// The result is not a global index to the entire
44752// `Tensor`, but rather just the index in the last dimension.
44753//
44754// A 2-D example:
44755//   sorted_sequence = [[0, 3, 9, 9, 10],
44756//                      [1, 2, 3, 4, 5]]
44757//   values = [[2, 4, 9],
44758//             [0, 2, 6]]
44759//
44760//   result = UpperBound(sorted_sequence, values)
44761//
44762//   result == [[1, 2, 4],
44763//              [0, 2, 5]]
44764//
44765// Arguments:
44766//	sorted_inputs: 2-D Tensor where each row is ordered.
44767//	values: 2-D Tensor with the same numbers of rows as `sorted_search_values`. Contains
44768// the values that will be searched for in `sorted_search_values`.
44769//
44770// Returns A `Tensor` with the same shape as `values`.  It contains the last scalar index
44771// into the last dimension where values can be inserted without changing the
44772// ordered property.
44773func UpperBound(scope *Scope, sorted_inputs tf.Output, values tf.Output, optional ...UpperBoundAttr) (output tf.Output) {
44774	if scope.Err() != nil {
44775		return
44776	}
44777	attrs := map[string]interface{}{}
44778	for _, a := range optional {
44779		a(attrs)
44780	}
44781	opspec := tf.OpSpec{
44782		Type: "UpperBound",
44783		Input: []tf.Input{
44784			sorted_inputs, values,
44785		},
44786		Attrs: attrs,
44787	}
44788	op := scope.AddOperation(opspec)
44789	return op.Output(0)
44790}
44791
44792// ResourceApplyFtrlV2Attr is an optional argument to ResourceApplyFtrlV2.
44793type ResourceApplyFtrlV2Attr func(optionalAttr)
44794
44795// ResourceApplyFtrlV2UseLocking sets the optional use_locking attribute to value.
44796//
44797// value: If `True`, updating of the var and accum tensors will be protected
44798// by a lock; otherwise the behavior is undefined, but may exhibit less
44799// contention.
44800// If not specified, defaults to false
44801func ResourceApplyFtrlV2UseLocking(value bool) ResourceApplyFtrlV2Attr {
44802	return func(m optionalAttr) {
44803		m["use_locking"] = value
44804	}
44805}
44806
44807// ResourceApplyFtrlV2MultiplyLinearByLr sets the optional multiply_linear_by_lr attribute to value.
44808// If not specified, defaults to false
44809func ResourceApplyFtrlV2MultiplyLinearByLr(value bool) ResourceApplyFtrlV2Attr {
44810	return func(m optionalAttr) {
44811		m["multiply_linear_by_lr"] = value
44812	}
44813}
44814
44815// Update '*var' according to the Ftrl-proximal scheme.
44816//
44817// grad_with_shrinkage = grad + 2 * l2_shrinkage * var
44818// accum_new = accum + grad_with_shrinkage * grad_with_shrinkage
44819// linear += grad_with_shrinkage +
44820//     (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
44821// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
44822// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
44823// accum = accum_new
44824//
44825// Arguments:
44826//	var_: Should be from a Variable().
44827//	accum: Should be from a Variable().
44828//	linear: Should be from a Variable().
44829//	grad: The gradient.
44830//	lr: Scaling factor. Must be a scalar.
44831//	l1: L1 regularization. Must be a scalar.
44832//	l2: L2 shrinkage regularization. Must be a scalar.
44833//
44834//	lr_power: Scaling factor. Must be a scalar.
44835//
44836// Returns the created operation.
44837func ResourceApplyFtrlV2(scope *Scope, var_ tf.Output, accum tf.Output, linear tf.Output, grad tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, l2_shrinkage tf.Output, lr_power tf.Output, optional ...ResourceApplyFtrlV2Attr) (o *tf.Operation) {
44838	if scope.Err() != nil {
44839		return
44840	}
44841	attrs := map[string]interface{}{}
44842	for _, a := range optional {
44843		a(attrs)
44844	}
44845	opspec := tf.OpSpec{
44846		Type: "ResourceApplyFtrlV2",
44847		Input: []tf.Input{
44848			var_, accum, linear, grad, lr, l1, l2, l2_shrinkage, lr_power,
44849		},
44850		Attrs: attrs,
44851	}
44852	return scope.AddOperation(opspec)
44853}
44854
44855// TensorArrayConcatV2Attr is an optional argument to TensorArrayConcatV2.
44856type TensorArrayConcatV2Attr func(optionalAttr)
44857
44858// TensorArrayConcatV2ElementShapeExcept0 sets the optional element_shape_except0 attribute to value.
44859// If not specified, defaults to <unknown_rank:true >
44860func TensorArrayConcatV2ElementShapeExcept0(value tf.Shape) TensorArrayConcatV2Attr {
44861	return func(m optionalAttr) {
44862		m["element_shape_except0"] = value
44863	}
44864}
44865
44866// Deprecated. Use TensorArrayConcatV3
44867func TensorArrayConcatV2(scope *Scope, handle tf.Output, flow_in tf.Output, dtype tf.DataType, optional ...TensorArrayConcatV2Attr) (value tf.Output, lengths tf.Output) {
44868	if scope.Err() != nil {
44869		return
44870	}
44871	attrs := map[string]interface{}{"dtype": dtype}
44872	for _, a := range optional {
44873		a(attrs)
44874	}
44875	opspec := tf.OpSpec{
44876		Type: "TensorArrayConcatV2",
44877		Input: []tf.Input{
44878			handle, flow_in,
44879		},
44880		Attrs: attrs,
44881	}
44882	op := scope.AddOperation(opspec)
44883	return op.Output(0), op.Output(1)
44884}
44885
44886// Writes contents to the file at input filename. Creates file and recursively
44887//
44888// creates directory if not existing.
44889//
44890// Arguments:
44891//	filename: scalar. The name of the file to which we write the contents.
44892//	contents: scalar. The content to be written to the output file.
44893//
44894// Returns the created operation.
44895func WriteFile(scope *Scope, filename tf.Output, contents tf.Output) (o *tf.Operation) {
44896	if scope.Err() != nil {
44897		return
44898	}
44899	opspec := tf.OpSpec{
44900		Type: "WriteFile",
44901		Input: []tf.Input{
44902			filename, contents,
44903		},
44904	}
44905	return scope.AddOperation(opspec)
44906}
44907
44908// MatrixSolveAttr is an optional argument to MatrixSolve.
44909type MatrixSolveAttr func(optionalAttr)
44910
44911// MatrixSolveAdjoint sets the optional adjoint attribute to value.
44912//
44913// value: Boolean indicating whether to solve with `matrix` or its (block-wise)
44914// adjoint.
44915// If not specified, defaults to false
44916func MatrixSolveAdjoint(value bool) MatrixSolveAttr {
44917	return func(m optionalAttr) {
44918		m["adjoint"] = value
44919	}
44920}
44921
44922// Solves systems of linear equations.
44923//
44924// `Matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
44925// form square matrices. `Rhs` is a tensor of shape `[..., M, K]`. The `output` is
44926// a tensor shape `[..., M, K]`.  If `adjoint` is `False` then each output matrix
44927// satisfies `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`.
44928// If `adjoint` is `True` then each output matrix satisfies
44929// `adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]`.
44930//
44931// Arguments:
44932//	matrix: Shape is `[..., M, M]`.
44933//	rhs: Shape is `[..., M, K]`.
44934//
44935// Returns Shape is `[..., M, K]`.
44936func MatrixSolve(scope *Scope, matrix tf.Output, rhs tf.Output, optional ...MatrixSolveAttr) (output tf.Output) {
44937	if scope.Err() != nil {
44938		return
44939	}
44940	attrs := map[string]interface{}{}
44941	for _, a := range optional {
44942		a(attrs)
44943	}
44944	opspec := tf.OpSpec{
44945		Type: "MatrixSolve",
44946		Input: []tf.Input{
44947			matrix, rhs,
44948		},
44949		Attrs: attrs,
44950	}
44951	op := scope.AddOperation(opspec)
44952	return op.Output(0)
44953}
44954
44955// WriteImageSummaryAttr is an optional argument to WriteImageSummary.
44956type WriteImageSummaryAttr func(optionalAttr)
44957
44958// WriteImageSummaryMaxImages sets the optional max_images attribute to value.
44959// If not specified, defaults to 3
44960//
44961// REQUIRES: value >= 1
44962func WriteImageSummaryMaxImages(value int64) WriteImageSummaryAttr {
44963	return func(m optionalAttr) {
44964		m["max_images"] = value
44965	}
44966}
44967
44968// Writes an image summary.
44969//
44970// Writes image `tensor` at `step` with `tag` using summary `writer`.
44971// `tensor` is image with shape [height, width, channels].
44972//
44973// Returns the created operation.
44974func WriteImageSummary(scope *Scope, writer tf.Output, step tf.Output, tag tf.Output, tensor tf.Output, bad_color tf.Output, optional ...WriteImageSummaryAttr) (o *tf.Operation) {
44975	if scope.Err() != nil {
44976		return
44977	}
44978	attrs := map[string]interface{}{}
44979	for _, a := range optional {
44980		a(attrs)
44981	}
44982	opspec := tf.OpSpec{
44983		Type: "WriteImageSummary",
44984		Input: []tf.Input{
44985			writer, step, tag, tensor, bad_color,
44986		},
44987		Attrs: attrs,
44988	}
44989	return scope.AddOperation(opspec)
44990}
44991
44992// Computes the Cholesky decomposition of one or more square matrices.
44993//
44994// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
44995// form square matrices.
44996//
44997// The input has to be symmetric and positive definite. Only the lower-triangular
44998// part of the input will be used for this operation. The upper-triangular part
44999// will not be read.
45000//
45001// The output is a tensor of the same shape as the input
45002// containing the Cholesky decompositions for all input submatrices `[..., :, :]`.
45003//
45004// **Note**: The gradient computation on GPU is faster for large matrices but
45005// not for large batch dimensions when the submatrices are small. In this
45006// case it might be faster to use the CPU.
45007//
45008// Arguments:
45009//	input: Shape is `[..., M, M]`.
45010//
45011// Returns Shape is `[..., M, M]`.
45012func Cholesky(scope *Scope, input tf.Output) (output tf.Output) {
45013	if scope.Err() != nil {
45014		return
45015	}
45016	opspec := tf.OpSpec{
45017		Type: "Cholesky",
45018		Input: []tf.Input{
45019			input,
45020		},
45021	}
45022	op := scope.AddOperation(opspec)
45023	return op.Output(0)
45024}
45025
45026// Computes the sum along sparse segments of a tensor divided by the sqrt of N.
45027//
45028// N is the size of the segment being reduced.
45029//
45030// Like `SparseSegmentSqrtN`, but allows missing ids in `segment_ids`. If an id is
45031// missing, the `output` tensor at that position will be zeroed.
45032//
45033// Read
45034// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
45035// for an explanation of segments.
45036//
45037// Arguments:
45038//
45039//	indices: A 1-D tensor. Has same rank as `segment_ids`.
45040//	segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
45041//	num_segments: Should equal the number of distinct segment IDs.
45042//
45043// Returns Has same shape as data, except for dimension 0 which
45044// has size `k`, the number of segments.
45045func SparseSegmentSqrtNWithNumSegments(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output) {
45046	if scope.Err() != nil {
45047		return
45048	}
45049	opspec := tf.OpSpec{
45050		Type: "SparseSegmentSqrtNWithNumSegments",
45051		Input: []tf.Input{
45052			data, indices, segment_ids, num_segments,
45053		},
45054	}
45055	op := scope.AddOperation(opspec)
45056	return op.Output(0)
45057}
45058
45059// Computes softmax cross entropy cost and gradients to backpropagate.
45060//
45061// Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept
45062// a matrix of label probabilities, but rather a single label per row
45063// of features.  This label is considered to have probability 1.0 for the
45064// given row.
45065//
45066// Inputs are the logits, not probabilities.
45067//
45068// Arguments:
45069//	features: batch_size x num_classes matrix
45070//	labels: batch_size vector with values in [0, num_classes).
45071// This is the label for the given minibatch entry.
45072//
45073// Returns:
45074//	loss: Per example loss (batch_size vector).
45075//	backprop: backpropagated gradients (batch_size x num_classes matrix).
45076func SparseSoftmaxCrossEntropyWithLogits(scope *Scope, features tf.Output, labels tf.Output) (loss tf.Output, backprop tf.Output) {
45077	if scope.Err() != nil {
45078		return
45079	}
45080	opspec := tf.OpSpec{
45081		Type: "SparseSoftmaxCrossEntropyWithLogits",
45082		Input: []tf.Input{
45083			features, labels,
45084		},
45085	}
45086	op := scope.AddOperation(opspec)
45087	return op.Output(0), op.Output(1)
45088}
45089
45090// Worker heartbeat op.
45091//
45092// Heartbeats may be sent periodically to indicate the coordinator is still active,
45093// to retrieve the current worker status and to expedite shutdown when necessary.
45094//
45095// Arguments:
45096//	request: A string tensor containing a serialized WorkerHeartbeatRequest
45097//
45098// Returns A string tensor containing a serialized WorkerHeartbeatResponse
45099func WorkerHeartbeat(scope *Scope, request tf.Output) (response tf.Output) {
45100	if scope.Err() != nil {
45101		return
45102	}
45103	opspec := tf.OpSpec{
45104		Type: "WorkerHeartbeat",
45105		Input: []tf.Input{
45106			request,
45107		},
45108	}
45109	op := scope.AddOperation(opspec)
45110	return op.Output(0)
45111}
45112
45113// ResourceApplyProximalGradientDescentAttr is an optional argument to ResourceApplyProximalGradientDescent.
45114type ResourceApplyProximalGradientDescentAttr func(optionalAttr)
45115
45116// ResourceApplyProximalGradientDescentUseLocking sets the optional use_locking attribute to value.
45117//
45118// value: If True, the subtraction will be protected by a lock;
45119// otherwise the behavior is undefined, but may exhibit less contention.
45120// If not specified, defaults to false
45121func ResourceApplyProximalGradientDescentUseLocking(value bool) ResourceApplyProximalGradientDescentAttr {
45122	return func(m optionalAttr) {
45123		m["use_locking"] = value
45124	}
45125}
45126
45127// Update '*var' as FOBOS algorithm with fixed learning rate.
45128//
45129// prox_v = var - alpha * delta
45130// var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
45131//
45132// Arguments:
45133//	var_: Should be from a Variable().
45134//	alpha: Scaling factor. Must be a scalar.
45135//	l1: L1 regularization. Must be a scalar.
45136//	l2: L2 regularization. Must be a scalar.
45137//	delta: The change.
45138//
45139// Returns the created operation.
45140func ResourceApplyProximalGradientDescent(scope *Scope, var_ tf.Output, alpha tf.Output, l1 tf.Output, l2 tf.Output, delta tf.Output, optional ...ResourceApplyProximalGradientDescentAttr) (o *tf.Operation) {
45141	if scope.Err() != nil {
45142		return
45143	}
45144	attrs := map[string]interface{}{}
45145	for _, a := range optional {
45146		a(attrs)
45147	}
45148	opspec := tf.OpSpec{
45149		Type: "ResourceApplyProximalGradientDescent",
45150		Input: []tf.Input{
45151			var_, alpha, l1, l2, delta,
45152		},
45153		Attrs: attrs,
45154	}
45155	return scope.AddOperation(opspec)
45156}
45157
45158// Constructs a tensor by tiling a given tensor.
45159//
45160// This operation creates a new tensor by replicating `input` `multiples` times.
45161// The output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements,
45162// and the values of `input` are replicated `multiples[i]` times along the 'i'th
45163// dimension. For example, tiling `[a b c d]` by `[2]` produces
45164// `[a b c d a b c d]`.
45165//
45166// >>> a = tf.constant([[1,2,3],[4,5,6]], tf.int32)
45167// >>> b = tf.constant([1,2], tf.int32)
45168// >>> tf.tile(a, b)
45169// <tf.Tensor: shape=(2, 6), dtype=int32, numpy=
45170// array([[1, 2, 3, 1, 2, 3],
45171//        [4, 5, 6, 4, 5, 6]], dtype=int32)>
45172// >>> c = tf.constant([2,1], tf.int32)
45173// >>> tf.tile(a, c)
45174// <tf.Tensor: shape=(4, 3), dtype=int32, numpy=
45175// array([[1, 2, 3],
45176//        [4, 5, 6],
45177//        [1, 2, 3],
45178//        [4, 5, 6]], dtype=int32)>
45179// >>> d = tf.constant([2,2], tf.int32)
45180// >>> tf.tile(a, d)
45181// <tf.Tensor: shape=(4, 6), dtype=int32, numpy=
45182// array([[1, 2, 3, 1, 2, 3],
45183//        [4, 5, 6, 4, 5, 6],
45184//        [1, 2, 3, 1, 2, 3],
45185//        [4, 5, 6, 4, 5, 6]], dtype=int32)>
45186//
45187// Arguments:
45188//	input: 1-D or higher.
45189//	multiples: 1-D. Length must be the same as the number of dimensions in `input`
45190func Tile(scope *Scope, input tf.Output, multiples tf.Output) (output tf.Output) {
45191	if scope.Err() != nil {
45192		return
45193	}
45194	opspec := tf.OpSpec{
45195		Type: "Tile",
45196		Input: []tf.Input{
45197			input, multiples,
45198		},
45199	}
45200	op := scope.AddOperation(opspec)
45201	return op.Output(0)
45202}
45203
45204// Computes the matrix logarithm of one or more square matrices:
45205//
45206//
45207// \\(log(exp(A)) = A\\)
45208//
45209// This op is only defined for complex matrices. If A is positive-definite and
45210// real, then casting to a complex matrix, taking the logarithm and casting back
45211// to a real matrix will give the correct result.
45212//
45213// This function computes the matrix logarithm using the Schur-Parlett algorithm.
45214// Details of the algorithm can be found in Section 11.6.2 of:
45215// Nicholas J. Higham, Functions of Matrices: Theory and Computation, SIAM 2008.
45216// ISBN 978-0-898716-46-7.
45217//
45218// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
45219// form square matrices. The output is a tensor of the same shape as the input
45220// containing the exponential for all input submatrices `[..., :, :]`.
45221//
45222// Arguments:
45223//	input: Shape is `[..., M, M]`.
45224//
45225// Returns Shape is `[..., M, M]`.
45226//
45227// @compatibility(scipy)
45228// Equivalent to scipy.linalg.logm
45229// @end_compatibility
45230func MatrixLogarithm(scope *Scope, input tf.Output) (output tf.Output) {
45231	if scope.Err() != nil {
45232		return
45233	}
45234	opspec := tf.OpSpec{
45235		Type: "MatrixLogarithm",
45236		Input: []tf.Input{
45237			input,
45238		},
45239	}
45240	op := scope.AddOperation(opspec)
45241	return op.Output(0)
45242}
45243
45244// ResourceApplyGradientDescentAttr is an optional argument to ResourceApplyGradientDescent.
45245type ResourceApplyGradientDescentAttr func(optionalAttr)
45246
45247// ResourceApplyGradientDescentUseLocking sets the optional use_locking attribute to value.
45248//
45249// value: If `True`, the subtraction will be protected by a lock;
45250// otherwise the behavior is undefined, but may exhibit less contention.
45251// If not specified, defaults to false
45252func ResourceApplyGradientDescentUseLocking(value bool) ResourceApplyGradientDescentAttr {
45253	return func(m optionalAttr) {
45254		m["use_locking"] = value
45255	}
45256}
45257
45258// Update '*var' by subtracting 'alpha' * 'delta' from it.
45259//
45260// Arguments:
45261//	var_: Should be from a Variable().
45262//	alpha: Scaling factor. Must be a scalar.
45263//	delta: The change.
45264//
45265// Returns the created operation.
45266func ResourceApplyGradientDescent(scope *Scope, var_ tf.Output, alpha tf.Output, delta tf.Output, optional ...ResourceApplyGradientDescentAttr) (o *tf.Operation) {
45267	if scope.Err() != nil {
45268		return
45269	}
45270	attrs := map[string]interface{}{}
45271	for _, a := range optional {
45272		a(attrs)
45273	}
45274	opspec := tf.OpSpec{
45275		Type: "ResourceApplyGradientDescent",
45276		Input: []tf.Input{
45277			var_, alpha, delta,
45278		},
45279		Attrs: attrs,
45280	}
45281	return scope.AddOperation(opspec)
45282}
45283
45284// Creates and returns an empty tensor list.
45285//
45286// All list elements must be tensors of dtype element_dtype and shape compatible
45287// with element_shape.
45288//
45289// handle: an empty tensor list.
45290// element_dtype: the type of elements in the list.
45291// element_shape: a shape compatible with that of elements in the list.
45292func EmptyTensorList(scope *Scope, element_shape tf.Output, max_num_elements tf.Output, element_dtype tf.DataType) (handle tf.Output) {
45293	if scope.Err() != nil {
45294		return
45295	}
45296	attrs := map[string]interface{}{"element_dtype": element_dtype}
45297	opspec := tf.OpSpec{
45298		Type: "EmptyTensorList",
45299		Input: []tf.Input{
45300			element_shape, max_num_elements,
45301		},
45302		Attrs: attrs,
45303	}
45304	op := scope.AddOperation(opspec)
45305	return op.Output(0)
45306}
45307
45308// Sets up TPUEmbedding in a distributed TPU system.
45309//
45310// Arguments:
45311//	config: Serialized tensorflow.tpu.TPUEmbeddingConfiguration that
45312// describes the embedding lookups of the program.
45313//
45314// Returns the created operation.
45315func ConfigureTPUEmbedding(scope *Scope, config string) (o *tf.Operation) {
45316	if scope.Err() != nil {
45317		return
45318	}
45319	attrs := map[string]interface{}{"config": config}
45320	opspec := tf.OpSpec{
45321		Type: "ConfigureTPUEmbedding",
45322
45323		Attrs: attrs,
45324	}
45325	return scope.AddOperation(opspec)
45326}
45327
45328// Shuts down a running distributed TPU system.
45329//
45330// The op returns an error if no system is running.
45331//
45332// Returns the created operation.
45333func ShutdownDistributedTPU(scope *Scope) (o *tf.Operation) {
45334	if scope.Err() != nil {
45335		return
45336	}
45337	opspec := tf.OpSpec{
45338		Type: "ShutdownDistributedTPU",
45339	}
45340	return scope.AddOperation(opspec)
45341}
45342
45343// ResourceApplyMomentumAttr is an optional argument to ResourceApplyMomentum.
45344type ResourceApplyMomentumAttr func(optionalAttr)
45345
45346// ResourceApplyMomentumUseLocking sets the optional use_locking attribute to value.
45347//
45348// value: If `True`, updating of the var and accum tensors will be protected
45349// by a lock; otherwise the behavior is undefined, but may exhibit less
45350// contention.
45351// If not specified, defaults to false
45352func ResourceApplyMomentumUseLocking(value bool) ResourceApplyMomentumAttr {
45353	return func(m optionalAttr) {
45354		m["use_locking"] = value
45355	}
45356}
45357
45358// ResourceApplyMomentumUseNesterov sets the optional use_nesterov attribute to value.
45359//
45360// value: If `True`, the tensor passed to compute grad will be
45361// var - lr * momentum * accum, so in the end, the var you get is actually
45362// var - lr * momentum * accum.
45363// If not specified, defaults to false
45364func ResourceApplyMomentumUseNesterov(value bool) ResourceApplyMomentumAttr {
45365	return func(m optionalAttr) {
45366		m["use_nesterov"] = value
45367	}
45368}
45369
45370// Update '*var' according to the momentum scheme.
45371//
45372// Set use_nesterov = True if you want to use Nesterov momentum.
45373//
45374// accum = accum * momentum + grad
45375// var -= lr * accum
45376//
45377// Arguments:
45378//	var_: Should be from a Variable().
45379//	accum: Should be from a Variable().
45380//	lr: Scaling factor. Must be a scalar.
45381//	grad: The gradient.
45382//	momentum: Momentum. Must be a scalar.
45383//
45384// Returns the created operation.
45385func ResourceApplyMomentum(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, momentum tf.Output, optional ...ResourceApplyMomentumAttr) (o *tf.Operation) {
45386	if scope.Err() != nil {
45387		return
45388	}
45389	attrs := map[string]interface{}{}
45390	for _, a := range optional {
45391		a(attrs)
45392	}
45393	opspec := tf.OpSpec{
45394		Type: "ResourceApplyMomentum",
45395		Input: []tf.Input{
45396			var_, accum, lr, grad, momentum,
45397		},
45398		Attrs: attrs,
45399	}
45400	return scope.AddOperation(opspec)
45401}
45402
45403// ConfigureDistributedTPUAttr is an optional argument to ConfigureDistributedTPU.
45404type ConfigureDistributedTPUAttr func(optionalAttr)
45405
45406// ConfigureDistributedTPUEmbeddingConfig sets the optional embedding_config attribute to value.
45407//
45408// value: Reserved. Do not use.
45409// If not specified, defaults to ""
45410func ConfigureDistributedTPUEmbeddingConfig(value string) ConfigureDistributedTPUAttr {
45411	return func(m optionalAttr) {
45412		m["embedding_config"] = value
45413	}
45414}
45415
45416// ConfigureDistributedTPUTpuEmbeddingConfig sets the optional tpu_embedding_config attribute to value.
45417//
45418// value: Serialized tensorflow.tpu.TPUEmbeddingConfiguration that
45419// describes the embedding lookups of the program.
45420// If not specified, defaults to ""
45421func ConfigureDistributedTPUTpuEmbeddingConfig(value string) ConfigureDistributedTPUAttr {
45422	return func(m optionalAttr) {
45423		m["tpu_embedding_config"] = value
45424	}
45425}
45426
45427// ConfigureDistributedTPUIsGlobalInit sets the optional is_global_init attribute to value.
45428//
45429// value: Reserved. Do not use.
45430// If not specified, defaults to false
45431func ConfigureDistributedTPUIsGlobalInit(value bool) ConfigureDistributedTPUAttr {
45432	return func(m optionalAttr) {
45433		m["is_global_init"] = value
45434	}
45435}
45436
45437// ConfigureDistributedTPUEnableWholeMeshCompilations sets the optional enable_whole_mesh_compilations attribute to value.
45438// If not specified, defaults to false
45439func ConfigureDistributedTPUEnableWholeMeshCompilations(value bool) ConfigureDistributedTPUAttr {
45440	return func(m optionalAttr) {
45441		m["enable_whole_mesh_compilations"] = value
45442	}
45443}
45444
45445// ConfigureDistributedTPUCompilationFailureClosesChips sets the optional compilation_failure_closes_chips attribute to value.
45446// If not specified, defaults to true
45447func ConfigureDistributedTPUCompilationFailureClosesChips(value bool) ConfigureDistributedTPUAttr {
45448	return func(m optionalAttr) {
45449		m["compilation_failure_closes_chips"] = value
45450	}
45451}
45452
45453// Sets up the centralized structures for a distributed TPU system.
45454//
45455// Returns A serialized tensorflow.tpu.TopologyProto that describes the TPU
45456// topology.
45457func ConfigureDistributedTPU(scope *Scope, optional ...ConfigureDistributedTPUAttr) (topology tf.Output) {
45458	if scope.Err() != nil {
45459		return
45460	}
45461	attrs := map[string]interface{}{}
45462	for _, a := range optional {
45463		a(attrs)
45464	}
45465	opspec := tf.OpSpec{
45466		Type: "ConfigureDistributedTPU",
45467
45468		Attrs: attrs,
45469	}
45470	op := scope.AddOperation(opspec)
45471	return op.Output(0)
45472}
45473
45474// Converts each string in the input Tensor to its hash mod by a number of buckets.
45475//
45476// The hash function is deterministic on the content of the string within the
45477// process. The hash function is a keyed hash function, where attribute `key`
45478// defines the key of the hash function. `key` is an array of 2 elements.
45479//
45480// A strong hash is important when inputs may be malicious, e.g. URLs with
45481// additional components. Adversaries could try to make their inputs hash to the
45482// same bucket for a denial-of-service attack or to skew the results. A strong
45483// hash can be used to make it difficult to find inputs with a skewed hash value
45484// distribution over buckets. This requires that the hash function is
45485// seeded by a high-entropy (random) "key" unknown to the adversary.
45486//
45487// The additional robustness comes at a cost of roughly 4x higher compute
45488// time than `tf.string_to_hash_bucket_fast`.
45489//
45490// Examples:
45491//
45492// >>> tf.strings.to_hash_bucket_strong(["Hello", "TF"], 3, [1, 2]).numpy()
45493// array([2, 0])
45494//
45495// Arguments:
45496//	input: The strings to assign a hash bucket.
45497//	num_buckets: The number of buckets.
45498//	key: The key used to seed the hash function, passed as a list of two uint64
45499// elements.
45500//
45501// Returns A Tensor of the same shape as the input `string_tensor`.
45502func StringToHashBucketStrong(scope *Scope, input tf.Output, num_buckets int64, key []int64) (output tf.Output) {
45503	if scope.Err() != nil {
45504		return
45505	}
45506	attrs := map[string]interface{}{"num_buckets": num_buckets, "key": key}
45507	opspec := tf.OpSpec{
45508		Type: "StringToHashBucketStrong",
45509		Input: []tf.Input{
45510			input,
45511		},
45512		Attrs: attrs,
45513	}
45514	op := scope.AddOperation(opspec)
45515	return op.Output(0)
45516}
45517
45518// Computes rectified linear gradients for a Relu operation.
45519//
45520// Arguments:
45521//	gradients: The backpropagated gradients to the corresponding Relu operation.
45522//	features: The features passed as input to the corresponding Relu operation, OR
45523// the outputs of that operation (both work equivalently).
45524//
45525// Returns `gradients * (features > 0)`.
45526func ReluGrad(scope *Scope, gradients tf.Output, features tf.Output) (backprops tf.Output) {
45527	if scope.Err() != nil {
45528		return
45529	}
45530	opspec := tf.OpSpec{
45531		Type: "ReluGrad",
45532		Input: []tf.Input{
45533			gradients, features,
45534		},
45535	}
45536	op := scope.AddOperation(opspec)
45537	return op.Output(0)
45538}
45539
45540// An Op to sum inputs across replicated TPU instances.
45541//
45542// Each instance supplies its own input.
45543//
45544// For example, suppose there are 8 TPU instances: `[A, B, C, D, E, F, G, H]`.
45545// Passing group_assignment=`[[0,2,4,6],[1,3,5,7]]` sets `A, C, E, G` as group 0,
45546// and `B, D, F, H` as group 1. Thus we get the outputs:
45547// `[A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H]`.
45548//
45549// Arguments:
45550//	input: The local input to the sum.
45551//	group_assignment: An int32 tensor with shape
45552// [num_groups, num_replicas_per_group]. `group_assignment[i]` represents the
45553// replica ids in the ith subgroup.
45554//
45555// Returns The sum of all the distributed inputs.
45556func CrossReplicaSum(scope *Scope, input tf.Output, group_assignment tf.Output) (output tf.Output) {
45557	if scope.Err() != nil {
45558		return
45559	}
45560	opspec := tf.OpSpec{
45561		Type: "CrossReplicaSum",
45562		Input: []tf.Input{
45563			input, group_assignment,
45564		},
45565	}
45566	op := scope.AddOperation(opspec)
45567	return op.Output(0)
45568}
45569
45570// LoadTPUEmbeddingFrequencyEstimatorParametersAttr is an optional argument to LoadTPUEmbeddingFrequencyEstimatorParameters.
45571type LoadTPUEmbeddingFrequencyEstimatorParametersAttr func(optionalAttr)
45572
45573// LoadTPUEmbeddingFrequencyEstimatorParametersTableId sets the optional table_id attribute to value.
45574// If not specified, defaults to -1
45575func LoadTPUEmbeddingFrequencyEstimatorParametersTableId(value int64) LoadTPUEmbeddingFrequencyEstimatorParametersAttr {
45576	return func(m optionalAttr) {
45577		m["table_id"] = value
45578	}
45579}
45580
45581// LoadTPUEmbeddingFrequencyEstimatorParametersTableName sets the optional table_name attribute to value.
45582// If not specified, defaults to ""
45583func LoadTPUEmbeddingFrequencyEstimatorParametersTableName(value string) LoadTPUEmbeddingFrequencyEstimatorParametersAttr {
45584	return func(m optionalAttr) {
45585		m["table_name"] = value
45586	}
45587}
45588
45589// LoadTPUEmbeddingFrequencyEstimatorParametersConfig sets the optional config attribute to value.
45590// If not specified, defaults to ""
45591func LoadTPUEmbeddingFrequencyEstimatorParametersConfig(value string) LoadTPUEmbeddingFrequencyEstimatorParametersAttr {
45592	return func(m optionalAttr) {
45593		m["config"] = value
45594	}
45595}
45596
45597// Load frequency estimator embedding parameters.
45598//
45599// An op that loads optimization parameters into HBM for embedding. Must be
45600// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
45601// embedding table configuration. For example, this op is used to install
45602// parameters that are loaded from a checkpoint before a training loop is
45603// executed.
45604//
45605// Arguments:
45606//	parameters: Value of parameters used in the frequency estimator optimization algorithm.
45607//	last_hit_step: Value of last_hit_step used in the frequency estimator optimization algorithm.
45608//
45609//
45610//
45611// Returns the created operation.
45612func LoadTPUEmbeddingFrequencyEstimatorParameters(scope *Scope, parameters tf.Output, last_hit_step tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingFrequencyEstimatorParametersAttr) (o *tf.Operation) {
45613	if scope.Err() != nil {
45614		return
45615	}
45616	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
45617	for _, a := range optional {
45618		a(attrs)
45619	}
45620	opspec := tf.OpSpec{
45621		Type: "LoadTPUEmbeddingFrequencyEstimatorParameters",
45622		Input: []tf.Input{
45623			parameters, last_hit_step,
45624		},
45625		Attrs: attrs,
45626	}
45627	return scope.AddOperation(opspec)
45628}
45629
45630// LoadTPUEmbeddingADAMParametersAttr is an optional argument to LoadTPUEmbeddingADAMParameters.
45631type LoadTPUEmbeddingADAMParametersAttr func(optionalAttr)
45632
45633// LoadTPUEmbeddingADAMParametersTableId sets the optional table_id attribute to value.
45634// If not specified, defaults to -1
45635func LoadTPUEmbeddingADAMParametersTableId(value int64) LoadTPUEmbeddingADAMParametersAttr {
45636	return func(m optionalAttr) {
45637		m["table_id"] = value
45638	}
45639}
45640
45641// LoadTPUEmbeddingADAMParametersTableName sets the optional table_name attribute to value.
45642// If not specified, defaults to ""
45643func LoadTPUEmbeddingADAMParametersTableName(value string) LoadTPUEmbeddingADAMParametersAttr {
45644	return func(m optionalAttr) {
45645		m["table_name"] = value
45646	}
45647}
45648
45649// LoadTPUEmbeddingADAMParametersConfig sets the optional config attribute to value.
45650// If not specified, defaults to ""
45651func LoadTPUEmbeddingADAMParametersConfig(value string) LoadTPUEmbeddingADAMParametersAttr {
45652	return func(m optionalAttr) {
45653		m["config"] = value
45654	}
45655}
45656
45657// Load ADAM embedding parameters.
45658//
45659// An op that loads optimization parameters into HBM for embedding. Must be
45660// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
45661// embedding table configuration. For example, this op is used to install
45662// parameters that are loaded from a checkpoint before a training loop is
45663// executed.
45664//
45665// Arguments:
45666//	parameters: Value of parameters used in the ADAM optimization algorithm.
45667//	momenta: Value of momenta used in the ADAM optimization algorithm.
45668//	velocities: Value of velocities used in the ADAM optimization algorithm.
45669//
45670//
45671//
45672// Returns the created operation.
45673func LoadTPUEmbeddingADAMParameters(scope *Scope, parameters tf.Output, momenta tf.Output, velocities tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingADAMParametersAttr) (o *tf.Operation) {
45674	if scope.Err() != nil {
45675		return
45676	}
45677	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
45678	for _, a := range optional {
45679		a(attrs)
45680	}
45681	opspec := tf.OpSpec{
45682		Type: "LoadTPUEmbeddingADAMParameters",
45683		Input: []tf.Input{
45684			parameters, momenta, velocities,
45685		},
45686		Attrs: attrs,
45687	}
45688	return scope.AddOperation(opspec)
45689}
45690
45691// Transforms a vector of brain.Example protos (as strings) into typed tensors.
45692//
45693// Arguments:
45694//	serialized: A vector containing a batch of binary serialized Example protos.
45695//	names: A vector containing the names of the serialized protos.
45696// May contain, for example, table key (descriptive) names for the
45697// corresponding serialized protos.  These are purely useful for debugging
45698// purposes, and the presence of values here has no effect on the output.
45699// May also be an empty vector if no names are available.
45700// If non-empty, this vector must be the same length as "serialized".
45701//	sparse_keys: A list of Nsparse string Tensors (scalars).
45702// The keys expected in the Examples' features associated with sparse values.
45703//	dense_keys: A list of Ndense string Tensors (scalars).
45704// The keys expected in the Examples' features associated with dense values.
45705//	dense_defaults: A list of Ndense Tensors (some may be empty).
45706// dense_defaults[j] provides default values
45707// when the example's feature_map lacks dense_key[j].  If an empty Tensor is
45708// provided for dense_defaults[j], then the Feature dense_keys[j] is required.
45709// The input type is inferred from dense_defaults[j], even when it's empty.
45710// If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined,
45711// then the shape of dense_defaults[j] must match that of dense_shapes[j].
45712// If dense_shapes[j] has an undefined major dimension (variable strides dense
45713// feature), dense_defaults[j] must contain a single element:
45714// the padding element.
45715//	sparse_types: A list of Nsparse types; the data types of data in each Feature
45716// given in sparse_keys.
45717// Currently the ParseExample supports DT_FLOAT (FloatList),
45718// DT_INT64 (Int64List), and DT_STRING (BytesList).
45719//	dense_shapes: A list of Ndense shapes; the shapes of data in each Feature
45720// given in dense_keys.
45721// The number of elements in the Feature corresponding to dense_key[j]
45722// must always equal dense_shapes[j].NumEntries().
45723// If dense_shapes[j] == (D0, D1, ..., DN) then the shape of output
45724// Tensor dense_values[j] will be (|serialized|, D0, D1, ..., DN):
45725// The dense outputs are just the inputs row-stacked by batch.
45726// This works for dense_shapes[j] = (-1, D1, ..., DN).  In this case
45727// the shape of the output Tensor dense_values[j] will be
45728// (|serialized|, M, D1, .., DN), where M is the maximum number of blocks
45729// of elements of length D1 * .... * DN, across all minibatch entries
45730// in the input.  Any minibatch entry with less than M blocks of elements of
45731// length D1 * ... * DN will be padded with the corresponding default_value
45732// scalar element along the second dimension.
45733func ParseExample(scope *Scope, serialized tf.Output, names tf.Output, sparse_keys []tf.Output, dense_keys []tf.Output, dense_defaults []tf.Output, sparse_types []tf.DataType, dense_shapes []tf.Shape) (sparse_indices []tf.Output, sparse_values []tf.Output, sparse_shapes []tf.Output, dense_values []tf.Output) {
45734	if scope.Err() != nil {
45735		return
45736	}
45737	attrs := map[string]interface{}{"sparse_types": sparse_types, "dense_shapes": dense_shapes}
45738	opspec := tf.OpSpec{
45739		Type: "ParseExample",
45740		Input: []tf.Input{
45741			serialized, names, tf.OutputList(sparse_keys), tf.OutputList(dense_keys), tf.OutputList(dense_defaults),
45742		},
45743		Attrs: attrs,
45744	}
45745	op := scope.AddOperation(opspec)
45746	if scope.Err() != nil {
45747		return
45748	}
45749	var idx int
45750	var err error
45751	if sparse_indices, idx, err = makeOutputList(op, idx, "sparse_indices"); err != nil {
45752		scope.UpdateErr("ParseExample", err)
45753		return
45754	}
45755	if sparse_values, idx, err = makeOutputList(op, idx, "sparse_values"); err != nil {
45756		scope.UpdateErr("ParseExample", err)
45757		return
45758	}
45759	if sparse_shapes, idx, err = makeOutputList(op, idx, "sparse_shapes"); err != nil {
45760		scope.UpdateErr("ParseExample", err)
45761		return
45762	}
45763	if dense_values, idx, err = makeOutputList(op, idx, "dense_values"); err != nil {
45764		scope.UpdateErr("ParseExample", err)
45765		return
45766	}
45767	return sparse_indices, sparse_values, sparse_shapes, dense_values
45768}
45769
45770// Records the latency of producing `input_dataset` elements in a StatsAggregator.
45771func LatencyStatsDataset(scope *Scope, input_dataset tf.Output, tag tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
45772	if scope.Err() != nil {
45773		return
45774	}
45775	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
45776	opspec := tf.OpSpec{
45777		Type: "LatencyStatsDataset",
45778		Input: []tf.Input{
45779			input_dataset, tag,
45780		},
45781		Attrs: attrs,
45782	}
45783	op := scope.AddOperation(opspec)
45784	return op.Output(0)
45785}
45786
45787// Computes the power of one value to another.
45788//
45789// Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for
45790// corresponding elements in `x` and `y`. For example:
45791//
45792// ```
45793// # tensor 'x' is [[2, 2]], [3, 3]]
45794// # tensor 'y' is [[8, 16], [2, 3]]
45795// tf.pow(x, y) ==> [[256, 65536], [9, 27]]
45796// ```
45797func Pow(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
45798	if scope.Err() != nil {
45799		return
45800	}
45801	opspec := tf.OpSpec{
45802		Type: "Pow",
45803		Input: []tf.Input{
45804			x, y,
45805		},
45806	}
45807	op := scope.AddOperation(opspec)
45808	return op.Output(0)
45809}
45810
45811// Element-wise multiplication of a sparse matrix with a dense tensor.
45812//
45813// Returns a sparse matrix.
45814//
45815// The dense tensor `b` may be either a scalar; otherwise `a` must be a rank-3
45816// `SparseMatrix`; in this case `b` must be shaped `[batch_size, 1, 1]` and the
45817// multiply operation broadcasts.
45818//
45819// **NOTE** even if `b` is zero, the sparsity structure of the output does not
45820// change.
45821//
45822// Arguments:
45823//	a: A CSRSparseMatrix.
45824//	b: A dense tensor.
45825//
45826// Returns A dense output tensor.
45827func SparseMatrixMul(scope *Scope, a tf.Output, b tf.Output) (output tf.Output) {
45828	if scope.Err() != nil {
45829		return
45830	}
45831	opspec := tf.OpSpec{
45832		Type: "SparseMatrixMul",
45833		Input: []tf.Input{
45834			a, b,
45835		},
45836	}
45837	op := scope.AddOperation(opspec)
45838	return op.Output(0)
45839}
45840
45841// Returns the element-wise sum of a list of tensors.
45842//
45843// `tf.accumulate_n_v2` performs the same operation as `tf.add_n`, but does not
45844// wait for all of its inputs to be ready before beginning to sum. This can
45845// save memory if inputs are ready at different times, since minimum temporary
45846// storage is proportional to the output size rather than the inputs size.
45847//
45848// Unlike the original `accumulate_n`, `accumulate_n_v2` is differentiable.
45849//
45850// Returns a `Tensor` of same shape and type as the elements of `inputs`.
45851//
45852// Arguments:
45853//	inputs: A list of `Tensor` objects, each with same shape and type.
45854//	shape: Shape of elements of `inputs`.
45855func AccumulateNV2(scope *Scope, inputs []tf.Output, shape tf.Shape) (sum tf.Output) {
45856	if scope.Err() != nil {
45857		return
45858	}
45859	attrs := map[string]interface{}{"shape": shape}
45860	opspec := tf.OpSpec{
45861		Type: "AccumulateNV2",
45862		Input: []tf.Input{
45863			tf.OutputList(inputs),
45864		},
45865		Attrs: attrs,
45866	}
45867	op := scope.AddOperation(opspec)
45868	return op.Output(0)
45869}
45870
45871// An op enabling differentiation of TPU Embeddings.
45872//
45873// This op simply returns its first input, which is assumed to have been sliced
45874// from the Tensors returned by TPUEmbeddingDequeueActivations. The presence of
45875// this op, and its first argument being a trainable Variable, enables automatic
45876// differentiation of graphs containing embeddings via the TPU Embedding Python
45877// libraries.
45878//
45879// Arguments:
45880//	embedding_variable: A trainable variable, enabling optimizers to find this op.
45881//	sliced_activations: The embedding activations Tensor to return.
45882//	table_id: The id of the table in the embedding layer configuration from which
45883// these activations were computed.
45884//	lookup_id: Identifier of the set of embedding indices which produced these
45885// activations.
45886func TPUEmbeddingActivations(scope *Scope, embedding_variable tf.Output, sliced_activations tf.Output, table_id int64, lookup_id int64) (output tf.Output) {
45887	if scope.Err() != nil {
45888		return
45889	}
45890	attrs := map[string]interface{}{"table_id": table_id, "lookup_id": lookup_id}
45891	opspec := tf.OpSpec{
45892		Type: "TPUEmbeddingActivations",
45893		Input: []tf.Input{
45894			embedding_variable, sliced_activations,
45895		},
45896		Attrs: attrs,
45897	}
45898	op := scope.AddOperation(opspec)
45899	return op.Output(0)
45900}
45901
45902// StatelessTruncatedNormalV2Attr is an optional argument to StatelessTruncatedNormalV2.
45903type StatelessTruncatedNormalV2Attr func(optionalAttr)
45904
45905// StatelessTruncatedNormalV2Dtype sets the optional dtype attribute to value.
45906//
45907// value: The type of the output.
45908// If not specified, defaults to DT_FLOAT
45909func StatelessTruncatedNormalV2Dtype(value tf.DataType) StatelessTruncatedNormalV2Attr {
45910	return func(m optionalAttr) {
45911		m["dtype"] = value
45912	}
45913}
45914
45915// Outputs deterministic pseudorandom values from a truncated normal distribution.
45916//
45917// The generated values follow a normal distribution with mean 0 and standard
45918// deviation 1, except that values whose magnitude is more than 2 standard
45919// deviations from the mean are dropped and re-picked.
45920//
45921// The outputs are a deterministic function of `shape`, `key`, `counter` and `alg`.
45922//
45923// Arguments:
45924//	shape: The shape of the output tensor.
45925//	key: Key for the counter-based RNG algorithm (shape uint64[1]).
45926//	counter: Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used.
45927//	alg: The RNG algorithm (shape int32[]).
45928//
45929// Returns Random values with specified shape.
45930func StatelessTruncatedNormalV2(scope *Scope, shape tf.Output, key tf.Output, counter tf.Output, alg tf.Output, optional ...StatelessTruncatedNormalV2Attr) (output tf.Output) {
45931	if scope.Err() != nil {
45932		return
45933	}
45934	attrs := map[string]interface{}{}
45935	for _, a := range optional {
45936		a(attrs)
45937	}
45938	opspec := tf.OpSpec{
45939		Type: "StatelessTruncatedNormalV2",
45940		Input: []tf.Input{
45941			shape, key, counter, alg,
45942		},
45943		Attrs: attrs,
45944	}
45945	op := scope.AddOperation(opspec)
45946	return op.Output(0)
45947}
45948
45949// QuantizeAndDequantizeV3Attr is an optional argument to QuantizeAndDequantizeV3.
45950type QuantizeAndDequantizeV3Attr func(optionalAttr)
45951
45952// QuantizeAndDequantizeV3SignedInput sets the optional signed_input attribute to value.
45953// If not specified, defaults to true
45954func QuantizeAndDequantizeV3SignedInput(value bool) QuantizeAndDequantizeV3Attr {
45955	return func(m optionalAttr) {
45956		m["signed_input"] = value
45957	}
45958}
45959
45960// QuantizeAndDequantizeV3RangeGiven sets the optional range_given attribute to value.
45961// If not specified, defaults to true
45962func QuantizeAndDequantizeV3RangeGiven(value bool) QuantizeAndDequantizeV3Attr {
45963	return func(m optionalAttr) {
45964		m["range_given"] = value
45965	}
45966}
45967
45968// QuantizeAndDequantizeV3NarrowRange sets the optional narrow_range attribute to value.
45969// If not specified, defaults to false
45970func QuantizeAndDequantizeV3NarrowRange(value bool) QuantizeAndDequantizeV3Attr {
45971	return func(m optionalAttr) {
45972		m["narrow_range"] = value
45973	}
45974}
45975
45976// QuantizeAndDequantizeV3Axis sets the optional axis attribute to value.
45977// If not specified, defaults to -1
45978func QuantizeAndDequantizeV3Axis(value int64) QuantizeAndDequantizeV3Attr {
45979	return func(m optionalAttr) {
45980		m["axis"] = value
45981	}
45982}
45983
45984// Quantizes then dequantizes a tensor.
45985//
45986// This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a
45987// tensor, so its value can change during training.
45988func QuantizeAndDequantizeV3(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, num_bits tf.Output, optional ...QuantizeAndDequantizeV3Attr) (output tf.Output) {
45989	if scope.Err() != nil {
45990		return
45991	}
45992	attrs := map[string]interface{}{}
45993	for _, a := range optional {
45994		a(attrs)
45995	}
45996	opspec := tf.OpSpec{
45997		Type: "QuantizeAndDequantizeV3",
45998		Input: []tf.Input{
45999			input, input_min, input_max, num_bits,
46000		},
46001		Attrs: attrs,
46002	}
46003	op := scope.AddOperation(opspec)
46004	return op.Output(0)
46005}
46006
46007// Returns x * y element-wise.
46008//
46009// *NOTE*: `Multiply` supports broadcasting. More about broadcasting
46010// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
46011func Mul(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
46012	if scope.Err() != nil {
46013		return
46014	}
46015	opspec := tf.OpSpec{
46016		Type: "Mul",
46017		Input: []tf.Input{
46018			x, y,
46019		},
46020	}
46021	op := scope.AddOperation(opspec)
46022	return op.Output(0)
46023}
46024
46025// Computes softplus gradients for a softplus operation.
46026//
46027// Arguments:
46028//	gradients: The backpropagated gradients to the corresponding softplus operation.
46029//	features: The features passed as input to the corresponding softplus operation.
46030//
46031// Returns The gradients: `gradients / (1 + exp(-features))`.
46032func SoftplusGrad(scope *Scope, gradients tf.Output, features tf.Output) (backprops tf.Output) {
46033	if scope.Err() != nil {
46034		return
46035	}
46036	opspec := tf.OpSpec{
46037		Type: "SoftplusGrad",
46038		Input: []tf.Input{
46039			gradients, features,
46040		},
46041	}
46042	op := scope.AddOperation(opspec)
46043	return op.Output(0)
46044}
46045
46046// StatelessRandomNormalV2Attr is an optional argument to StatelessRandomNormalV2.
46047type StatelessRandomNormalV2Attr func(optionalAttr)
46048
46049// StatelessRandomNormalV2Dtype sets the optional dtype attribute to value.
46050//
46051// value: The type of the output.
46052// If not specified, defaults to DT_FLOAT
46053func StatelessRandomNormalV2Dtype(value tf.DataType) StatelessRandomNormalV2Attr {
46054	return func(m optionalAttr) {
46055		m["dtype"] = value
46056	}
46057}
46058
46059// Outputs deterministic pseudorandom values from a normal distribution.
46060//
46061// The generated values will have mean 0 and standard deviation 1.
46062//
46063// The outputs are a deterministic function of `shape`, `key`, `counter` and `alg`.
46064//
46065// Arguments:
46066//	shape: The shape of the output tensor.
46067//	key: Key for the counter-based RNG algorithm (shape uint64[1]).
46068//	counter: Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used.
46069//	alg: The RNG algorithm (shape int32[]).
46070//
46071// Returns Random values with specified shape.
46072func StatelessRandomNormalV2(scope *Scope, shape tf.Output, key tf.Output, counter tf.Output, alg tf.Output, optional ...StatelessRandomNormalV2Attr) (output tf.Output) {
46073	if scope.Err() != nil {
46074		return
46075	}
46076	attrs := map[string]interface{}{}
46077	for _, a := range optional {
46078		a(attrs)
46079	}
46080	opspec := tf.OpSpec{
46081		Type: "StatelessRandomNormalV2",
46082		Input: []tf.Input{
46083			shape, key, counter, alg,
46084		},
46085		Attrs: attrs,
46086	}
46087	op := scope.AddOperation(opspec)
46088	return op.Output(0)
46089}
46090
46091// RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebugAttr is an optional argument to RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug.
46092type RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebugAttr func(optionalAttr)
46093
46094// RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebugTableId sets the optional table_id attribute to value.
46095// If not specified, defaults to -1
46096func RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebugTableId(value int64) RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebugAttr {
46097	return func(m optionalAttr) {
46098		m["table_id"] = value
46099	}
46100}
46101
46102// RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebugTableName sets the optional table_name attribute to value.
46103// If not specified, defaults to ""
46104func RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebugTableName(value string) RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebugAttr {
46105	return func(m optionalAttr) {
46106		m["table_name"] = value
46107	}
46108}
46109
46110// RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebugConfig sets the optional config attribute to value.
46111// If not specified, defaults to ""
46112func RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebugConfig(value string) RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebugAttr {
46113	return func(m optionalAttr) {
46114		m["config"] = value
46115	}
46116}
46117
46118// Retrieve proximal Adagrad embedding parameters with debug support.
46119//
46120// An op that retrieves optimization parameters from embedding to host
46121// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
46122// the correct embedding table configuration. For example, this op is
46123// used to retrieve updated parameters before saving a checkpoint.
46124//
46125// Returns:
46126//	parameters: Parameter parameters updated by the proximal Adagrad optimization algorithm.
46127//	accumulators: Parameter accumulators updated by the proximal Adagrad optimization algorithm.
46128//	gradient_accumulators: Parameter gradient_accumulators updated by the proximal Adagrad optimization algorithm.
46129func RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebugAttr) (parameters tf.Output, accumulators tf.Output, gradient_accumulators tf.Output) {
46130	if scope.Err() != nil {
46131		return
46132	}
46133	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
46134	for _, a := range optional {
46135		a(attrs)
46136	}
46137	opspec := tf.OpSpec{
46138		Type: "RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug",
46139
46140		Attrs: attrs,
46141	}
46142	op := scope.AddOperation(opspec)
46143	return op.Output(0), op.Output(1), op.Output(2)
46144}
46145
46146// IRFFTAttr is an optional argument to IRFFT.
46147type IRFFTAttr func(optionalAttr)
46148
46149// IRFFTTreal sets the optional Treal attribute to value.
46150// If not specified, defaults to DT_FLOAT
46151func IRFFTTreal(value tf.DataType) IRFFTAttr {
46152	return func(m optionalAttr) {
46153		m["Treal"] = value
46154	}
46155}
46156
46157// Inverse real-valued fast Fourier transform.
46158//
46159// Computes the inverse 1-dimensional discrete Fourier transform of a real-valued
46160// signal over the inner-most dimension of `input`.
46161//
46162// The inner-most dimension of `input` is assumed to be the result of `RFFT`: the
46163// `fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If
46164// `fft_length` is not provided, it is computed from the size of the inner-most
46165// dimension of `input` (`fft_length = 2 * (inner - 1)`). If the FFT length used to
46166// compute `input` is odd, it should be provided since it cannot be inferred
46167// properly.
46168//
46169// Along the axis `IRFFT` is computed on, if `fft_length / 2 + 1` is smaller
46170// than the corresponding dimension of `input`, the dimension is cropped. If it is
46171// larger, the dimension is padded with zeros.
46172//
46173// Arguments:
46174//	input: A complex tensor.
46175//	fft_length: An int32 tensor of shape [1]. The FFT length.
46176//
46177// Returns A float32 tensor of the same rank as `input`. The inner-most
46178//   dimension of `input` is replaced with the `fft_length` samples of its inverse
46179//   1D Fourier transform.
46180//
46181// @compatibility(numpy)
46182// Equivalent to np.fft.irfft
46183// @end_compatibility
46184func IRFFT(scope *Scope, input tf.Output, fft_length tf.Output, optional ...IRFFTAttr) (output tf.Output) {
46185	if scope.Err() != nil {
46186		return
46187	}
46188	attrs := map[string]interface{}{}
46189	for _, a := range optional {
46190		a(attrs)
46191	}
46192	opspec := tf.OpSpec{
46193		Type: "IRFFT",
46194		Input: []tf.Input{
46195			input, fft_length,
46196		},
46197		Attrs: attrs,
46198	}
46199	op := scope.AddOperation(opspec)
46200	return op.Output(0)
46201}
46202
46203// Computes the sum along sparse segments of a tensor divided by the sqrt of N.
46204//
46205// N is the size of the segment being reduced.
46206//
46207// See `tf.sparse.segment_sum` for usage examples.
46208//
46209//
46210// Arguments:
46211//
46212//	indices: A 1-D tensor. Has same rank as `segment_ids`.
46213//	segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
46214//
46215// Returns Has same shape as data, except for dimension 0 which
46216// has size `k`, the number of segments.
46217func SparseSegmentSqrtN(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output) (output tf.Output) {
46218	if scope.Err() != nil {
46219		return
46220	}
46221	opspec := tf.OpSpec{
46222		Type: "SparseSegmentSqrtN",
46223		Input: []tf.Input{
46224			data, indices, segment_ids,
46225		},
46226	}
46227	op := scope.AddOperation(opspec)
46228	return op.Output(0)
46229}
46230
46231// Computes square root of x element-wise.
46232//
46233// I.e., \\(y = \sqrt{x} = x^{1/2}\\).
46234func Sqrt(scope *Scope, x tf.Output) (y tf.Output) {
46235	if scope.Err() != nil {
46236		return
46237	}
46238	opspec := tf.OpSpec{
46239		Type: "Sqrt",
46240		Input: []tf.Input{
46241			x,
46242		},
46243	}
46244	op := scope.AddOperation(opspec)
46245	return op.Output(0)
46246}
46247
46248// Gradient op for `MirrorPad` op. This op folds a mirror-padded tensor.
46249//
46250// This operation folds the padded areas of `input` by `MirrorPad` according to the
46251// `paddings` you specify. `paddings` must be the same as `paddings` argument
46252// given to the corresponding `MirrorPad` op.
46253//
46254// The folded size of each dimension D of the output is:
46255//
46256// `input.dim_size(D) - paddings(D, 0) - paddings(D, 1)`
46257//
46258// For example:
46259//
46260// ```
46261// # 't' is [[1, 2, 3], [4, 5, 6], [7, 8, 9]].
46262// # 'paddings' is [[0, 1]], [0, 1]].
46263// # 'mode' is SYMMETRIC.
46264// # rank of 't' is 2.
46265// pad(t, paddings) ==> [[ 1,  5]
46266//                       [11, 28]]
46267// ```
46268//
46269// Arguments:
46270//	input: The input tensor to be folded.
46271//	paddings: A two-column matrix specifying the padding sizes. The number of
46272// rows must be the same as the rank of `input`.
46273//	mode: The mode used in the `MirrorPad` op.
46274//
46275// Returns The folded tensor.
46276func MirrorPadGrad(scope *Scope, input tf.Output, paddings tf.Output, mode string) (output tf.Output) {
46277	if scope.Err() != nil {
46278		return
46279	}
46280	attrs := map[string]interface{}{"mode": mode}
46281	opspec := tf.OpSpec{
46282		Type: "MirrorPadGrad",
46283		Input: []tf.Input{
46284			input, paddings,
46285		},
46286		Attrs: attrs,
46287	}
46288	op := scope.AddOperation(opspec)
46289	return op.Output(0)
46290}
46291
46292// Produces the max pool of the input tensor for quantized types.
46293//
46294// Arguments:
46295//	input: The 4D (batch x rows x cols x depth) Tensor to MaxReduce over.
46296//	min_input: The float value that the lowest quantized input value represents.
46297//	max_input: The float value that the highest quantized input value represents.
46298//	ksize: The size of the window for each dimension of the input tensor.
46299// The length must be 4 to match the number of dimensions of the input.
46300//	strides: The stride of the sliding window for each dimension of the input
46301// tensor. The length must be 4 to match the number of dimensions of the input.
46302//	padding: The type of padding algorithm to use.
46303//
46304// Returns:
46305//	output
46306//	min_output: The float value that the lowest quantized output value represents.
46307//	max_output: The float value that the highest quantized output value represents.
46308func QuantizedMaxPool(scope *Scope, input tf.Output, min_input tf.Output, max_input tf.Output, ksize []int64, strides []int64, padding string) (output tf.Output, min_output tf.Output, max_output tf.Output) {
46309	if scope.Err() != nil {
46310		return
46311	}
46312	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
46313	opspec := tf.OpSpec{
46314		Type: "QuantizedMaxPool",
46315		Input: []tf.Input{
46316			input, min_input, max_input,
46317		},
46318		Attrs: attrs,
46319	}
46320	op := scope.AddOperation(opspec)
46321	return op.Output(0), op.Output(1), op.Output(2)
46322}
46323
46324// ResourceApplyAdagradAttr is an optional argument to ResourceApplyAdagrad.
46325type ResourceApplyAdagradAttr func(optionalAttr)
46326
46327// ResourceApplyAdagradUseLocking sets the optional use_locking attribute to value.
46328//
46329// value: If `True`, updating of the var and accum tensors will be protected
46330// by a lock; otherwise the behavior is undefined, but may exhibit less
46331// contention.
46332// If not specified, defaults to false
46333func ResourceApplyAdagradUseLocking(value bool) ResourceApplyAdagradAttr {
46334	return func(m optionalAttr) {
46335		m["use_locking"] = value
46336	}
46337}
46338
46339// ResourceApplyAdagradUpdateSlots sets the optional update_slots attribute to value.
46340// If not specified, defaults to true
46341func ResourceApplyAdagradUpdateSlots(value bool) ResourceApplyAdagradAttr {
46342	return func(m optionalAttr) {
46343		m["update_slots"] = value
46344	}
46345}
46346
46347// Update '*var' according to the adagrad scheme.
46348//
46349// accum += grad * grad
46350// var -= lr * grad * (1 / sqrt(accum))
46351//
46352// Arguments:
46353//	var_: Should be from a Variable().
46354//	accum: Should be from a Variable().
46355//	lr: Scaling factor. Must be a scalar.
46356//	grad: The gradient.
46357//
46358// Returns the created operation.
46359func ResourceApplyAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, optional ...ResourceApplyAdagradAttr) (o *tf.Operation) {
46360	if scope.Err() != nil {
46361		return
46362	}
46363	attrs := map[string]interface{}{}
46364	for _, a := range optional {
46365		a(attrs)
46366	}
46367	opspec := tf.OpSpec{
46368		Type: "ResourceApplyAdagrad",
46369		Input: []tf.Input{
46370			var_, accum, lr, grad,
46371		},
46372		Attrs: attrs,
46373	}
46374	return scope.AddOperation(opspec)
46375}
46376
46377// FakeQuantWithMinMaxArgsAttr is an optional argument to FakeQuantWithMinMaxArgs.
46378type FakeQuantWithMinMaxArgsAttr func(optionalAttr)
46379
46380// FakeQuantWithMinMaxArgsMin sets the optional min attribute to value.
46381// If not specified, defaults to -6
46382func FakeQuantWithMinMaxArgsMin(value float32) FakeQuantWithMinMaxArgsAttr {
46383	return func(m optionalAttr) {
46384		m["min"] = value
46385	}
46386}
46387
46388// FakeQuantWithMinMaxArgsMax sets the optional max attribute to value.
46389// If not specified, defaults to 6
46390func FakeQuantWithMinMaxArgsMax(value float32) FakeQuantWithMinMaxArgsAttr {
46391	return func(m optionalAttr) {
46392		m["max"] = value
46393	}
46394}
46395
46396// FakeQuantWithMinMaxArgsNumBits sets the optional num_bits attribute to value.
46397// If not specified, defaults to 8
46398func FakeQuantWithMinMaxArgsNumBits(value int64) FakeQuantWithMinMaxArgsAttr {
46399	return func(m optionalAttr) {
46400		m["num_bits"] = value
46401	}
46402}
46403
46404// FakeQuantWithMinMaxArgsNarrowRange sets the optional narrow_range attribute to value.
46405// If not specified, defaults to false
46406func FakeQuantWithMinMaxArgsNarrowRange(value bool) FakeQuantWithMinMaxArgsAttr {
46407	return func(m optionalAttr) {
46408		m["narrow_range"] = value
46409	}
46410}
46411
46412// Fake-quantize the 'inputs' tensor, type float to 'outputs' tensor of same type.
46413//
46414// Attributes
46415//
46416// *   `[min; max]` define the clamping range for the `inputs` data.
46417// *   `inputs` values are quantized into the quantization range (
46418// `[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]`
46419// when it is true) and then de-quantized and output as floats in `[min; max]`
46420// interval.
46421// *   `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.
46422//
46423// Before quantization, `min` and `max` values are adjusted with the following
46424// logic.
46425// It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values,
46426// the behavior can be unexpected:
46427//
46428// *   If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`.
46429// *   If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`.
46430// *   If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `,
46431// `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`.
46432//
46433// Quantization is called fake since the output is still in floating point.
46434func FakeQuantWithMinMaxArgs(scope *Scope, inputs tf.Output, optional ...FakeQuantWithMinMaxArgsAttr) (outputs tf.Output) {
46435	if scope.Err() != nil {
46436		return
46437	}
46438	attrs := map[string]interface{}{}
46439	for _, a := range optional {
46440		a(attrs)
46441	}
46442	opspec := tf.OpSpec{
46443		Type: "FakeQuantWithMinMaxArgs",
46444		Input: []tf.Input{
46445			inputs,
46446		},
46447		Attrs: attrs,
46448	}
46449	op := scope.AddOperation(opspec)
46450	return op.Output(0)
46451}
46452
46453// Batch normalization.
46454//
46455// DEPRECATED at GraphDef version 9: Use tf.nn.batch_normalization()
46456//
46457// This op is deprecated. Prefer `tf.nn.batch_normalization`.
46458//
46459// Arguments:
46460//	t: A 4D input Tensor.
46461//	m: A 1D mean Tensor with size matching the last dimension of t.
46462// This is the first output from tf.nn.moments,
46463// or a saved moving average thereof.
46464//	v: A 1D variance Tensor with size matching the last dimension of t.
46465// This is the second output from tf.nn.moments,
46466// or a saved moving average thereof.
46467//	beta: A 1D beta Tensor with size matching the last dimension of t.
46468// An offset to be added to the normalized tensor.
46469//	gamma: A 1D gamma Tensor with size matching the last dimension of t.
46470// If "scale_after_normalization" is true, this tensor will be multiplied
46471// with the normalized tensor.
46472//	variance_epsilon: A small float number to avoid dividing by 0.
46473//	scale_after_normalization: A bool indicating whether the resulted tensor
46474// needs to be multiplied with gamma.
46475func BatchNormWithGlobalNormalization(scope *Scope, t tf.Output, m tf.Output, v tf.Output, beta tf.Output, gamma tf.Output, variance_epsilon float32, scale_after_normalization bool) (result tf.Output) {
46476	if scope.Err() != nil {
46477		return
46478	}
46479	attrs := map[string]interface{}{"variance_epsilon": variance_epsilon, "scale_after_normalization": scale_after_normalization}
46480	opspec := tf.OpSpec{
46481		Type: "BatchNormWithGlobalNormalization",
46482		Input: []tf.Input{
46483			t, m, v, beta, gamma,
46484		},
46485		Attrs: attrs,
46486	}
46487	op := scope.AddOperation(opspec)
46488	return op.Output(0)
46489}
46490
46491// EncodeBase64Attr is an optional argument to EncodeBase64.
46492type EncodeBase64Attr func(optionalAttr)
46493
46494// EncodeBase64Pad sets the optional pad attribute to value.
46495//
46496// value: Bool whether padding is applied at the ends.
46497// If not specified, defaults to false
46498func EncodeBase64Pad(value bool) EncodeBase64Attr {
46499	return func(m optionalAttr) {
46500		m["pad"] = value
46501	}
46502}
46503
46504// Encode strings into web-safe base64 format.
46505//
46506// Refer to the following article for more information on base64 format:
46507// en.wikipedia.org/wiki/Base64. Base64 strings may have padding with '=' at the
46508// end so that the encoded has length multiple of 4. See Padding section of the
46509// link above.
46510//
46511// Web-safe means that the encoder uses - and _ instead of + and /.
46512//
46513// Arguments:
46514//	input: Strings to be encoded.
46515//
46516// Returns Input strings encoded in base64.
46517func EncodeBase64(scope *Scope, input tf.Output, optional ...EncodeBase64Attr) (output tf.Output) {
46518	if scope.Err() != nil {
46519		return
46520	}
46521	attrs := map[string]interface{}{}
46522	for _, a := range optional {
46523		a(attrs)
46524	}
46525	opspec := tf.OpSpec{
46526		Type: "EncodeBase64",
46527		Input: []tf.Input{
46528			input,
46529		},
46530		Attrs: attrs,
46531	}
46532	op := scope.AddOperation(opspec)
46533	return op.Output(0)
46534}
46535
46536// ResourceApplyAdagradV2Attr is an optional argument to ResourceApplyAdagradV2.
46537type ResourceApplyAdagradV2Attr func(optionalAttr)
46538
46539// ResourceApplyAdagradV2UseLocking sets the optional use_locking attribute to value.
46540//
46541// value: If `True`, updating of the var and accum tensors will be protected
46542// by a lock; otherwise the behavior is undefined, but may exhibit less
46543// contention.
46544// If not specified, defaults to false
46545func ResourceApplyAdagradV2UseLocking(value bool) ResourceApplyAdagradV2Attr {
46546	return func(m optionalAttr) {
46547		m["use_locking"] = value
46548	}
46549}
46550
46551// ResourceApplyAdagradV2UpdateSlots sets the optional update_slots attribute to value.
46552// If not specified, defaults to true
46553func ResourceApplyAdagradV2UpdateSlots(value bool) ResourceApplyAdagradV2Attr {
46554	return func(m optionalAttr) {
46555		m["update_slots"] = value
46556	}
46557}
46558
46559// Update '*var' according to the adagrad scheme.
46560//
46561// accum += grad * grad
46562// var -= lr * grad * (1 / (sqrt(accum) + epsilon))
46563//
46564// Arguments:
46565//	var_: Should be from a Variable().
46566//	accum: Should be from a Variable().
46567//	lr: Scaling factor. Must be a scalar.
46568//	epsilon: Constant factor. Must be a scalar.
46569//	grad: The gradient.
46570//
46571// Returns the created operation.
46572func ResourceApplyAdagradV2(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyAdagradV2Attr) (o *tf.Operation) {
46573	if scope.Err() != nil {
46574		return
46575	}
46576	attrs := map[string]interface{}{}
46577	for _, a := range optional {
46578		a(attrs)
46579	}
46580	opspec := tf.OpSpec{
46581		Type: "ResourceApplyAdagradV2",
46582		Input: []tf.Input{
46583			var_, accum, lr, epsilon, grad,
46584		},
46585		Attrs: attrs,
46586	}
46587	return scope.AddOperation(opspec)
46588}
46589
46590// Op that loads and executes a TPU program on a TPU device.
46591//
46592// For the internal use of the distributed TPU compiler.
46593func TPUExecute(scope *Scope, args []tf.Output, key tf.Output, Tresults []tf.DataType) (results []tf.Output) {
46594	if scope.Err() != nil {
46595		return
46596	}
46597	attrs := map[string]interface{}{"Tresults": Tresults}
46598	opspec := tf.OpSpec{
46599		Type: "TPUExecute",
46600		Input: []tf.Input{
46601			tf.OutputList(args), key,
46602		},
46603		Attrs: attrs,
46604	}
46605	op := scope.AddOperation(opspec)
46606	if scope.Err() != nil {
46607		return
46608	}
46609	var idx int
46610	var err error
46611	if results, idx, err = makeOutputList(op, idx, "results"); err != nil {
46612		scope.UpdateErr("TPUExecute", err)
46613		return
46614	}
46615	return results
46616}
46617
46618// Creates a dataset that batches input elements into a SparseTensor.
46619//
46620// Arguments:
46621//	input_dataset: A handle to an input dataset. Must have a single component.
46622//	batch_size: A scalar representing the number of elements to accumulate in a
46623// batch.
46624//	row_shape: A vector representing the dense shape of each row in the produced
46625// SparseTensor. The shape may be partially specified, using `-1` to indicate
46626// that a particular dimension should use the maximum size of all batch elements.
46627//
46628//
46629func DenseToSparseBatchDataset(scope *Scope, input_dataset tf.Output, batch_size tf.Output, row_shape tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
46630	if scope.Err() != nil {
46631		return
46632	}
46633	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
46634	opspec := tf.OpSpec{
46635		Type: "DenseToSparseBatchDataset",
46636		Input: []tf.Input{
46637			input_dataset, batch_size, row_shape,
46638		},
46639		Attrs: attrs,
46640	}
46641	op := scope.AddOperation(opspec)
46642	return op.Output(0)
46643}
46644
46645// LoadTPUEmbeddingAdadeltaParametersGradAccumDebugAttr is an optional argument to LoadTPUEmbeddingAdadeltaParametersGradAccumDebug.
46646type LoadTPUEmbeddingAdadeltaParametersGradAccumDebugAttr func(optionalAttr)
46647
46648// LoadTPUEmbeddingAdadeltaParametersGradAccumDebugTableId sets the optional table_id attribute to value.
46649// If not specified, defaults to -1
46650func LoadTPUEmbeddingAdadeltaParametersGradAccumDebugTableId(value int64) LoadTPUEmbeddingAdadeltaParametersGradAccumDebugAttr {
46651	return func(m optionalAttr) {
46652		m["table_id"] = value
46653	}
46654}
46655
46656// LoadTPUEmbeddingAdadeltaParametersGradAccumDebugTableName sets the optional table_name attribute to value.
46657// If not specified, defaults to ""
46658func LoadTPUEmbeddingAdadeltaParametersGradAccumDebugTableName(value string) LoadTPUEmbeddingAdadeltaParametersGradAccumDebugAttr {
46659	return func(m optionalAttr) {
46660		m["table_name"] = value
46661	}
46662}
46663
46664// LoadTPUEmbeddingAdadeltaParametersGradAccumDebugConfig sets the optional config attribute to value.
46665// If not specified, defaults to ""
46666func LoadTPUEmbeddingAdadeltaParametersGradAccumDebugConfig(value string) LoadTPUEmbeddingAdadeltaParametersGradAccumDebugAttr {
46667	return func(m optionalAttr) {
46668		m["config"] = value
46669	}
46670}
46671
46672// Load Adadelta parameters with debug support.
46673//
46674// An op that loads optimization parameters into HBM for embedding. Must be
46675// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
46676// embedding table configuration. For example, this op is used to install
46677// parameters that are loaded from a checkpoint before a training loop is
46678// executed.
46679//
46680// Arguments:
46681//	parameters: Value of parameters used in the Adadelta optimization algorithm.
46682//	accumulators: Value of accumulators used in the Adadelta optimization algorithm.
46683//	updates: Value of updates used in the Adadelta optimization algorithm.
46684//	gradient_accumulators: Value of gradient_accumulators used in the Adadelta optimization algorithm.
46685//
46686//
46687//
46688// Returns the created operation.
46689func LoadTPUEmbeddingAdadeltaParametersGradAccumDebug(scope *Scope, parameters tf.Output, accumulators tf.Output, updates tf.Output, gradient_accumulators tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingAdadeltaParametersGradAccumDebugAttr) (o *tf.Operation) {
46690	if scope.Err() != nil {
46691		return
46692	}
46693	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
46694	for _, a := range optional {
46695		a(attrs)
46696	}
46697	opspec := tf.OpSpec{
46698		Type: "LoadTPUEmbeddingAdadeltaParametersGradAccumDebug",
46699		Input: []tf.Input{
46700			parameters, accumulators, updates, gradient_accumulators,
46701		},
46702		Attrs: attrs,
46703	}
46704	return scope.AddOperation(opspec)
46705}
46706
46707// DepthwiseConv2dNativeAttr is an optional argument to DepthwiseConv2dNative.
46708type DepthwiseConv2dNativeAttr func(optionalAttr)
46709
46710// DepthwiseConv2dNativeExplicitPaddings sets the optional explicit_paddings attribute to value.
46711// If not specified, defaults to <>
46712func DepthwiseConv2dNativeExplicitPaddings(value []int64) DepthwiseConv2dNativeAttr {
46713	return func(m optionalAttr) {
46714		m["explicit_paddings"] = value
46715	}
46716}
46717
46718// DepthwiseConv2dNativeDataFormat sets the optional data_format attribute to value.
46719//
46720// value: Specify the data format of the input and output data. With the
46721// default format "NHWC", the data is stored in the order of:
46722//     [batch, height, width, channels].
46723// Alternatively, the format could be "NCHW", the data storage order of:
46724//     [batch, channels, height, width].
46725// If not specified, defaults to "NHWC"
46726func DepthwiseConv2dNativeDataFormat(value string) DepthwiseConv2dNativeAttr {
46727	return func(m optionalAttr) {
46728		m["data_format"] = value
46729	}
46730}
46731
46732// DepthwiseConv2dNativeDilations sets the optional dilations attribute to value.
46733//
46734// value: 1-D tensor of length 4.  The dilation factor for each dimension of
46735// `input`. If set to k > 1, there will be k-1 skipped cells between each filter
46736// element on that dimension. The dimension order is determined by the value of
46737// `data_format`, see above for details. Dilations in the batch and depth
46738// dimensions must be 1.
46739// If not specified, defaults to <i:1 i:1 i:1 i:1 >
46740func DepthwiseConv2dNativeDilations(value []int64) DepthwiseConv2dNativeAttr {
46741	return func(m optionalAttr) {
46742		m["dilations"] = value
46743	}
46744}
46745
46746// Computes a 2-D depthwise convolution given 4-D `input` and `filter` tensors.
46747//
46748// Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
46749// and a filter / kernel tensor of shape
46750// `[filter_height, filter_width, in_channels, channel_multiplier]`, containing
46751// `in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies
46752// a different filter to each input channel (expanding from 1 channel to
46753// `channel_multiplier` channels for each), then concatenates the results
46754// together. Thus, the output has `in_channels * channel_multiplier` channels.
46755//
46756// ```
46757// for k in 0..in_channels-1
46758//   for q in 0..channel_multiplier-1
46759//     output[b, i, j, k * channel_multiplier + q] =
46760//       sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] *
46761//                         filter[di, dj, k, q]
46762// ```
46763//
46764// Must have `strides[0] = strides[3] = 1`.  For the most common case of the same
46765// horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
46766//
46767// Arguments:
46768//
46769//
46770//	strides: 1-D of length 4.  The stride of the sliding window for each dimension
46771// of `input`.
46772//	padding: The type of padding algorithm to use.
46773func DepthwiseConv2dNative(scope *Scope, input tf.Output, filter tf.Output, strides []int64, padding string, optional ...DepthwiseConv2dNativeAttr) (output tf.Output) {
46774	if scope.Err() != nil {
46775		return
46776	}
46777	attrs := map[string]interface{}{"strides": strides, "padding": padding}
46778	for _, a := range optional {
46779		a(attrs)
46780	}
46781	opspec := tf.OpSpec{
46782		Type: "DepthwiseConv2dNative",
46783		Input: []tf.Input{
46784			input, filter,
46785		},
46786		Attrs: attrs,
46787	}
46788	op := scope.AddOperation(opspec)
46789	return op.Output(0)
46790}
46791
46792// Creates an all-zeros CSRSparseMatrix with shape `dense_shape`.
46793//
46794// Arguments:
46795//	dense_shape: The desired matrix shape.
46796//
46797//
46798// Returns An empty CSR matrix with shape `dense_shape`.
46799func SparseMatrixZeros(scope *Scope, dense_shape tf.Output, type_ tf.DataType) (sparse_matrix tf.Output) {
46800	if scope.Err() != nil {
46801		return
46802	}
46803	attrs := map[string]interface{}{"type": type_}
46804	opspec := tf.OpSpec{
46805		Type: "SparseMatrixZeros",
46806		Input: []tf.Input{
46807			dense_shape,
46808		},
46809		Attrs: attrs,
46810	}
46811	op := scope.AddOperation(opspec)
46812	return op.Output(0)
46813}
46814
46815// EqualAttr is an optional argument to Equal.
46816type EqualAttr func(optionalAttr)
46817
46818// EqualIncompatibleShapeError sets the optional incompatible_shape_error attribute to value.
46819// If not specified, defaults to true
46820func EqualIncompatibleShapeError(value bool) EqualAttr {
46821	return func(m optionalAttr) {
46822		m["incompatible_shape_error"] = value
46823	}
46824}
46825
46826// Returns the truth value of (x == y) element-wise.
46827//
46828// *NOTE*: `Equal` supports broadcasting. More about broadcasting
46829// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
46830//
46831// ```python
46832// x = tf.constant([2, 4])
46833// y = tf.constant(2)
46834// tf.math.equal(x, y) ==> array([True, False])
46835//
46836// x = tf.constant([2, 4])
46837// y = tf.constant([2, 4])
46838// tf.math.equal(x, y) ==> array([True,  True])
46839// ```
46840func Equal(scope *Scope, x tf.Output, y tf.Output, optional ...EqualAttr) (z tf.Output) {
46841	if scope.Err() != nil {
46842		return
46843	}
46844	attrs := map[string]interface{}{}
46845	for _, a := range optional {
46846		a(attrs)
46847	}
46848	opspec := tf.OpSpec{
46849		Type: "Equal",
46850		Input: []tf.Input{
46851			x, y,
46852		},
46853		Attrs: attrs,
46854	}
46855	op := scope.AddOperation(opspec)
46856	return op.Output(0)
46857}
46858
46859// SparseToSparseSetOperationAttr is an optional argument to SparseToSparseSetOperation.
46860type SparseToSparseSetOperationAttr func(optionalAttr)
46861
46862// SparseToSparseSetOperationValidateIndices sets the optional validate_indices attribute to value.
46863// If not specified, defaults to true
46864func SparseToSparseSetOperationValidateIndices(value bool) SparseToSparseSetOperationAttr {
46865	return func(m optionalAttr) {
46866		m["validate_indices"] = value
46867	}
46868}
46869
46870// Applies set operation along last dimension of 2 `SparseTensor` inputs.
46871//
46872// See SetOperationOp::SetOperationFromContext for values of `set_operation`.
46873//
46874// If `validate_indices` is `True`, `SparseToSparseSetOperation` validates the
46875// order and range of `set1` and `set2` indices.
46876//
46877// Input `set1` is a `SparseTensor` represented by `set1_indices`, `set1_values`,
46878// and `set1_shape`. For `set1` ranked `n`, 1st `n-1` dimensions must be the same
46879// as `set2`. Dimension `n` contains values in a set, duplicates are allowed but
46880// ignored.
46881//
46882// Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`,
46883// and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same
46884// as `set1`. Dimension `n` contains values in a set, duplicates are allowed but
46885// ignored.
46886//
46887// If `validate_indices` is `True`, this op validates the order and range of `set1`
46888// and `set2` indices.
46889//
46890// Output `result` is a `SparseTensor` represented by `result_indices`,
46891// `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this
46892// has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`
46893// dimension contains the result of `set_operation` applied to the corresponding
46894// `[0...n-1]` dimension of `set`.
46895//
46896// Arguments:
46897//	set1_indices: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major
46898// order.
46899//	set1_values: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major
46900// order.
46901//	set1_shape: 1D `Tensor`, shape of a `SparseTensor`. `set1_shape[0...n-1]` must
46902// be the same as `set2_shape[0...n-1]`, `set1_shape[n]` is the
46903// max set size across `0...n-1` dimensions.
46904//	set2_indices: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major
46905// order.
46906//	set2_values: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major
46907// order.
46908//	set2_shape: 1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must
46909// be the same as `set1_shape[0...n-1]`, `set2_shape[n]` is the
46910// max set size across `0...n-1` dimensions.
46911//
46912//
46913// Returns:
46914//	result_indices: 2D indices of a `SparseTensor`.
46915//	result_values: 1D values of a `SparseTensor`.
46916//	result_shape: 1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is
46917// the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`
46918// is the max result set size across all `0...n-1` dimensions.
46919func SparseToSparseSetOperation(scope *Scope, set1_indices tf.Output, set1_values tf.Output, set1_shape tf.Output, set2_indices tf.Output, set2_values tf.Output, set2_shape tf.Output, set_operation string, optional ...SparseToSparseSetOperationAttr) (result_indices tf.Output, result_values tf.Output, result_shape tf.Output) {
46920	if scope.Err() != nil {
46921		return
46922	}
46923	attrs := map[string]interface{}{"set_operation": set_operation}
46924	for _, a := range optional {
46925		a(attrs)
46926	}
46927	opspec := tf.OpSpec{
46928		Type: "SparseToSparseSetOperation",
46929		Input: []tf.Input{
46930			set1_indices, set1_values, set1_shape, set2_indices, set2_values, set2_shape,
46931		},
46932		Attrs: attrs,
46933	}
46934	op := scope.AddOperation(opspec)
46935	return op.Output(0), op.Output(1), op.Output(2)
46936}
46937
46938// MaxPoolGradGradV2Attr is an optional argument to MaxPoolGradGradV2.
46939type MaxPoolGradGradV2Attr func(optionalAttr)
46940
46941// MaxPoolGradGradV2DataFormat sets the optional data_format attribute to value.
46942//
46943// value: Specify the data format of the input and output data. With the
46944// default format "NHWC", the data is stored in the order of:
46945//     [batch, in_height, in_width, in_channels].
46946// Alternatively, the format could be "NCHW", the data storage order of:
46947//     [batch, in_channels, in_height, in_width].
46948// If not specified, defaults to "NHWC"
46949func MaxPoolGradGradV2DataFormat(value string) MaxPoolGradGradV2Attr {
46950	return func(m optionalAttr) {
46951		m["data_format"] = value
46952	}
46953}
46954
46955// Computes second-order gradients of the maxpooling function.
46956//
46957// Arguments:
46958//	orig_input: The original input tensor.
46959//	orig_output: The original output tensor.
46960//	grad: 4-D.  Gradients of gradients w.r.t. the input of `max_pool`.
46961//	ksize: The size of the window for each dimension of the input tensor.
46962//	strides: The stride of the sliding window for each dimension of the
46963// input tensor.
46964//	padding: The type of padding algorithm to use.
46965//
46966// Returns Gradients of gradients w.r.t. the input to `max_pool`.
46967func MaxPoolGradGradV2(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize tf.Output, strides tf.Output, padding string, optional ...MaxPoolGradGradV2Attr) (output tf.Output) {
46968	if scope.Err() != nil {
46969		return
46970	}
46971	attrs := map[string]interface{}{"padding": padding}
46972	for _, a := range optional {
46973		a(attrs)
46974	}
46975	opspec := tf.OpSpec{
46976		Type: "MaxPoolGradGradV2",
46977		Input: []tf.Input{
46978			orig_input, orig_output, grad, ksize, strides,
46979		},
46980		Attrs: attrs,
46981	}
46982	op := scope.AddOperation(opspec)
46983	return op.Output(0)
46984}
46985
46986// An op to send a tensor to the host.
46987//
46988// input: the tensor that will be sent to the host.
46989// Tinput: element type for input.
46990// key: A unique identifier for this region used to match up host transfers.
46991//
46992// Returns the created operation.
46993func XlaSendToHost(scope *Scope, input tf.Output, key string) (o *tf.Operation) {
46994	if scope.Err() != nil {
46995		return
46996	}
46997	attrs := map[string]interface{}{"key": key}
46998	opspec := tf.OpSpec{
46999		Type: "XlaSendToHost",
47000		Input: []tf.Input{
47001			input,
47002		},
47003		Attrs: attrs,
47004	}
47005	return scope.AddOperation(opspec)
47006}
47007
47008// ResourceSparseApplyRMSPropAttr is an optional argument to ResourceSparseApplyRMSProp.
47009type ResourceSparseApplyRMSPropAttr func(optionalAttr)
47010
47011// ResourceSparseApplyRMSPropUseLocking sets the optional use_locking attribute to value.
47012//
47013// value: If `True`, updating of the var, ms, and mom tensors is protected
47014// by a lock; otherwise the behavior is undefined, but may exhibit less
47015// contention.
47016// If not specified, defaults to false
47017func ResourceSparseApplyRMSPropUseLocking(value bool) ResourceSparseApplyRMSPropAttr {
47018	return func(m optionalAttr) {
47019		m["use_locking"] = value
47020	}
47021}
47022
47023// Update '*var' according to the RMSProp algorithm.
47024//
47025// Note that in dense implementation of this algorithm, ms and mom will
47026// update even if the grad is zero, but in this sparse implementation, ms
47027// and mom will not update in iterations during which the grad is zero.
47028//
47029// mean_square = decay * mean_square + (1-decay) * gradient ** 2
47030// Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
47031//
47032// ms <- rho * ms_{t-1} + (1-rho) * grad * grad
47033// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
47034// var <- var - mom
47035//
47036// Arguments:
47037//	var_: Should be from a Variable().
47038//	ms: Should be from a Variable().
47039//	mom: Should be from a Variable().
47040//	lr: Scaling factor. Must be a scalar.
47041//	rho: Decay rate. Must be a scalar.
47042//
47043//	epsilon: Ridge term. Must be a scalar.
47044//	grad: The gradient.
47045//	indices: A vector of indices into the first dimension of var, ms and mom.
47046//
47047// Returns the created operation.
47048func ResourceSparseApplyRMSProp(scope *Scope, var_ tf.Output, ms tf.Output, mom tf.Output, lr tf.Output, rho tf.Output, momentum tf.Output, epsilon tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyRMSPropAttr) (o *tf.Operation) {
47049	if scope.Err() != nil {
47050		return
47051	}
47052	attrs := map[string]interface{}{}
47053	for _, a := range optional {
47054		a(attrs)
47055	}
47056	opspec := tf.OpSpec{
47057		Type: "ResourceSparseApplyRMSProp",
47058		Input: []tf.Input{
47059			var_, ms, mom, lr, rho, momentum, epsilon, grad, indices,
47060		},
47061		Attrs: attrs,
47062	}
47063	return scope.AddOperation(opspec)
47064}
47065
47066// RetrieveTPUEmbeddingMomentumParametersAttr is an optional argument to RetrieveTPUEmbeddingMomentumParameters.
47067type RetrieveTPUEmbeddingMomentumParametersAttr func(optionalAttr)
47068
47069// RetrieveTPUEmbeddingMomentumParametersTableId sets the optional table_id attribute to value.
47070// If not specified, defaults to -1
47071func RetrieveTPUEmbeddingMomentumParametersTableId(value int64) RetrieveTPUEmbeddingMomentumParametersAttr {
47072	return func(m optionalAttr) {
47073		m["table_id"] = value
47074	}
47075}
47076
47077// RetrieveTPUEmbeddingMomentumParametersTableName sets the optional table_name attribute to value.
47078// If not specified, defaults to ""
47079func RetrieveTPUEmbeddingMomentumParametersTableName(value string) RetrieveTPUEmbeddingMomentumParametersAttr {
47080	return func(m optionalAttr) {
47081		m["table_name"] = value
47082	}
47083}
47084
47085// RetrieveTPUEmbeddingMomentumParametersConfig sets the optional config attribute to value.
47086// If not specified, defaults to ""
47087func RetrieveTPUEmbeddingMomentumParametersConfig(value string) RetrieveTPUEmbeddingMomentumParametersAttr {
47088	return func(m optionalAttr) {
47089		m["config"] = value
47090	}
47091}
47092
47093// Retrieve Momentum embedding parameters.
47094//
47095// An op that retrieves optimization parameters from embedding to host
47096// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
47097// the correct embedding table configuration. For example, this op is
47098// used to retrieve updated parameters before saving a checkpoint.
47099//
47100// Returns:
47101//	parameters: Parameter parameters updated by the Momentum optimization algorithm.
47102//	momenta: Parameter momenta updated by the Momentum optimization algorithm.
47103func RetrieveTPUEmbeddingMomentumParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingMomentumParametersAttr) (parameters tf.Output, momenta tf.Output) {
47104	if scope.Err() != nil {
47105		return
47106	}
47107	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
47108	for _, a := range optional {
47109		a(attrs)
47110	}
47111	opspec := tf.OpSpec{
47112		Type: "RetrieveTPUEmbeddingMomentumParameters",
47113
47114		Attrs: attrs,
47115	}
47116	op := scope.AddOperation(opspec)
47117	return op.Output(0), op.Output(1)
47118}
47119
47120// LoadTPUEmbeddingMomentumParametersGradAccumDebugAttr is an optional argument to LoadTPUEmbeddingMomentumParametersGradAccumDebug.
47121type LoadTPUEmbeddingMomentumParametersGradAccumDebugAttr func(optionalAttr)
47122
47123// LoadTPUEmbeddingMomentumParametersGradAccumDebugTableId sets the optional table_id attribute to value.
47124// If not specified, defaults to -1
47125func LoadTPUEmbeddingMomentumParametersGradAccumDebugTableId(value int64) LoadTPUEmbeddingMomentumParametersGradAccumDebugAttr {
47126	return func(m optionalAttr) {
47127		m["table_id"] = value
47128	}
47129}
47130
47131// LoadTPUEmbeddingMomentumParametersGradAccumDebugTableName sets the optional table_name attribute to value.
47132// If not specified, defaults to ""
47133func LoadTPUEmbeddingMomentumParametersGradAccumDebugTableName(value string) LoadTPUEmbeddingMomentumParametersGradAccumDebugAttr {
47134	return func(m optionalAttr) {
47135		m["table_name"] = value
47136	}
47137}
47138
47139// LoadTPUEmbeddingMomentumParametersGradAccumDebugConfig sets the optional config attribute to value.
47140// If not specified, defaults to ""
47141func LoadTPUEmbeddingMomentumParametersGradAccumDebugConfig(value string) LoadTPUEmbeddingMomentumParametersGradAccumDebugAttr {
47142	return func(m optionalAttr) {
47143		m["config"] = value
47144	}
47145}
47146
47147// Load Momentum embedding parameters with debug support.
47148//
47149// An op that loads optimization parameters into HBM for embedding. Must be
47150// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
47151// embedding table configuration. For example, this op is used to install
47152// parameters that are loaded from a checkpoint before a training loop is
47153// executed.
47154//
47155// Arguments:
47156//	parameters: Value of parameters used in the Momentum optimization algorithm.
47157//	momenta: Value of momenta used in the Momentum optimization algorithm.
47158//	gradient_accumulators: Value of gradient_accumulators used in the Momentum optimization algorithm.
47159//
47160//
47161//
47162// Returns the created operation.
47163func LoadTPUEmbeddingMomentumParametersGradAccumDebug(scope *Scope, parameters tf.Output, momenta tf.Output, gradient_accumulators tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingMomentumParametersGradAccumDebugAttr) (o *tf.Operation) {
47164	if scope.Err() != nil {
47165		return
47166	}
47167	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
47168	for _, a := range optional {
47169		a(attrs)
47170	}
47171	opspec := tf.OpSpec{
47172		Type: "LoadTPUEmbeddingMomentumParametersGradAccumDebug",
47173		Input: []tf.Input{
47174			parameters, momenta, gradient_accumulators,
47175		},
47176		Attrs: attrs,
47177	}
47178	return scope.AddOperation(opspec)
47179}
47180
47181// SerializeSparseAttr is an optional argument to SerializeSparse.
47182type SerializeSparseAttr func(optionalAttr)
47183
47184// SerializeSparseOutType sets the optional out_type attribute to value.
47185//
47186// value: The `dtype` to use for serialization; the supported types are `string`
47187// (default) and `variant`.
47188// If not specified, defaults to DT_STRING
47189func SerializeSparseOutType(value tf.DataType) SerializeSparseAttr {
47190	return func(m optionalAttr) {
47191		m["out_type"] = value
47192	}
47193}
47194
47195// Serialize a `SparseTensor` into a `[3]` `Tensor` object.
47196//
47197// Arguments:
47198//	sparse_indices: 2-D.  The `indices` of the `SparseTensor`.
47199//	sparse_values: 1-D.  The `values` of the `SparseTensor`.
47200//	sparse_shape: 1-D.  The `shape` of the `SparseTensor`.
47201func SerializeSparse(scope *Scope, sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output, optional ...SerializeSparseAttr) (serialized_sparse tf.Output) {
47202	if scope.Err() != nil {
47203		return
47204	}
47205	attrs := map[string]interface{}{}
47206	for _, a := range optional {
47207		a(attrs)
47208	}
47209	opspec := tf.OpSpec{
47210		Type: "SerializeSparse",
47211		Input: []tf.Input{
47212			sparse_indices, sparse_values, sparse_shape,
47213		},
47214		Attrs: attrs,
47215	}
47216	op := scope.AddOperation(opspec)
47217	return op.Output(0)
47218}
47219
47220// DataFormatDimMapAttr is an optional argument to DataFormatDimMap.
47221type DataFormatDimMapAttr func(optionalAttr)
47222
47223// DataFormatDimMapSrcFormat sets the optional src_format attribute to value.
47224//
47225// value: source data format.
47226// If not specified, defaults to "NHWC"
47227func DataFormatDimMapSrcFormat(value string) DataFormatDimMapAttr {
47228	return func(m optionalAttr) {
47229		m["src_format"] = value
47230	}
47231}
47232
47233// DataFormatDimMapDstFormat sets the optional dst_format attribute to value.
47234//
47235// value: destination data format.
47236// If not specified, defaults to "NCHW"
47237func DataFormatDimMapDstFormat(value string) DataFormatDimMapAttr {
47238	return func(m optionalAttr) {
47239		m["dst_format"] = value
47240	}
47241}
47242
47243// Returns the dimension index in the destination data format given the one in
47244//
47245// the source data format.
47246//
47247// Arguments:
47248//	x: A Tensor with each element as a dimension index in source data format.
47249// Must be in the range [-4, 4).
47250//
47251// Returns A Tensor with each element as a dimension index in destination data format.
47252func DataFormatDimMap(scope *Scope, x tf.Output, optional ...DataFormatDimMapAttr) (y tf.Output) {
47253	if scope.Err() != nil {
47254		return
47255	}
47256	attrs := map[string]interface{}{}
47257	for _, a := range optional {
47258		a(attrs)
47259	}
47260	opspec := tf.OpSpec{
47261		Type: "DataFormatDimMap",
47262		Input: []tf.Input{
47263			x,
47264		},
47265		Attrs: attrs,
47266	}
47267	op := scope.AddOperation(opspec)
47268	return op.Output(0)
47269}
47270
47271// Assigns a new value to a variable.
47272//
47273// Any ReadVariableOp with a control dependency on this op is guaranteed to return
47274// this value or a subsequent newer value of the variable.
47275//
47276// Arguments:
47277//	resource: handle to the resource in which to store the variable.
47278//	value: the value to set the new tensor to use.
47279//
47280// Returns the created operation.
47281func AssignVariableOp(scope *Scope, resource tf.Output, value tf.Output) (o *tf.Operation) {
47282	if scope.Err() != nil {
47283		return
47284	}
47285	opspec := tf.OpSpec{
47286		Type: "AssignVariableOp",
47287		Input: []tf.Input{
47288			resource, value,
47289		},
47290	}
47291	return scope.AddOperation(opspec)
47292}
47293
47294// An op which supports basic einsum op with 2 inputs and 1 output.
47295//
47296// This op has better TPU performance since it doesn't have explicitly reshape and
47297// transpose operations as tf.einsum does.
47298func XlaEinsum(scope *Scope, a tf.Output, b tf.Output, equation string) (product tf.Output) {
47299	if scope.Err() != nil {
47300		return
47301	}
47302	attrs := map[string]interface{}{"equation": equation}
47303	opspec := tf.OpSpec{
47304		Type: "XlaEinsum",
47305		Input: []tf.Input{
47306			a, b,
47307		},
47308		Attrs: attrs,
47309	}
47310	op := scope.AddOperation(opspec)
47311	return op.Output(0)
47312}
47313
47314// Extracts the average gradient in the given ConditionalAccumulator.
47315//
47316// The op blocks until sufficient (i.e., more than num_required)
47317// gradients have been accumulated.  If the accumulator has already
47318// aggregated more than num_required gradients, it returns the average of
47319// the accumulated gradients.  Also automatically increments the recorded
47320// global_step in the accumulator by 1, and resets the aggregate to 0.
47321//
47322// Arguments:
47323//	handle: The handle to an accumulator.
47324//	num_required: Number of gradients required before we return an aggregate.
47325//	dtype: The data type of accumulated gradients. Needs to correspond to the type
47326// of the accumulator.
47327//
47328// Returns The average of the accumulated gradients.
47329func ResourceAccumulatorTakeGradient(scope *Scope, handle tf.Output, num_required tf.Output, dtype tf.DataType) (average tf.Output) {
47330	if scope.Err() != nil {
47331		return
47332	}
47333	attrs := map[string]interface{}{"dtype": dtype}
47334	opspec := tf.OpSpec{
47335		Type: "ResourceAccumulatorTakeGradient",
47336		Input: []tf.Input{
47337			handle, num_required,
47338		},
47339		Attrs: attrs,
47340	}
47341	op := scope.AddOperation(opspec)
47342	return op.Output(0)
47343}
47344
47345// InfeedEnqueueAttr is an optional argument to InfeedEnqueue.
47346type InfeedEnqueueAttr func(optionalAttr)
47347
47348// InfeedEnqueueShape sets the optional shape attribute to value.
47349//
47350// value: The shape of the tensor.
47351// If not specified, defaults to <>
47352func InfeedEnqueueShape(value tf.Shape) InfeedEnqueueAttr {
47353	return func(m optionalAttr) {
47354		m["shape"] = value
47355	}
47356}
47357
47358// InfeedEnqueueLayout sets the optional layout attribute to value.
47359//
47360// value: A vector holding the requested layout in minor-to-major sequence.
47361// If a layout attribute is passed, but its values are all -1, the layout will
47362// be computed by the infeed operation.
47363// If not specified, defaults to <>
47364func InfeedEnqueueLayout(value []int64) InfeedEnqueueAttr {
47365	return func(m optionalAttr) {
47366		m["layout"] = value
47367	}
47368}
47369
47370// InfeedEnqueueDeviceOrdinal sets the optional device_ordinal attribute to value.
47371//
47372// value: The TPU device to use. This should be -1 when the Op
47373// is running on a TPU device, and >= 0 when the Op is running on the CPU
47374// device.
47375// If not specified, defaults to -1
47376func InfeedEnqueueDeviceOrdinal(value int64) InfeedEnqueueAttr {
47377	return func(m optionalAttr) {
47378		m["device_ordinal"] = value
47379	}
47380}
47381
47382// An op which feeds a single Tensor value into the computation.
47383//
47384// Arguments:
47385//	input: A tensor that will be provided using the infeed mechanism.
47386//
47387// Returns the created operation.
47388func InfeedEnqueue(scope *Scope, input tf.Output, optional ...InfeedEnqueueAttr) (o *tf.Operation) {
47389	if scope.Err() != nil {
47390		return
47391	}
47392	attrs := map[string]interface{}{}
47393	for _, a := range optional {
47394		a(attrs)
47395	}
47396	opspec := tf.OpSpec{
47397		Type: "InfeedEnqueue",
47398		Input: []tf.Input{
47399			input,
47400		},
47401		Attrs: attrs,
47402	}
47403	return scope.AddOperation(opspec)
47404}
47405
47406// Outputs deterministic pseudorandom random integers from a uniform distribution.
47407//
47408// The generated values follow a uniform distribution in the range `[minval, maxval)`.
47409//
47410// The outputs are a deterministic function of `shape`, `key`, `counter`, `alg`, `minval` and `maxval`.
47411//
47412// Arguments:
47413//	shape: The shape of the output tensor.
47414//	key: Key for the counter-based RNG algorithm (shape uint64[1]).
47415//	counter: Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used.
47416//	alg: The RNG algorithm (shape int32[]).
47417//	minval: Minimum value (inclusive, scalar).
47418//	maxval: Maximum value (exclusive, scalar).
47419//
47420// Returns Random values with specified shape.
47421func StatelessRandomUniformIntV2(scope *Scope, shape tf.Output, key tf.Output, counter tf.Output, alg tf.Output, minval tf.Output, maxval tf.Output) (output tf.Output) {
47422	if scope.Err() != nil {
47423		return
47424	}
47425	opspec := tf.OpSpec{
47426		Type: "StatelessRandomUniformIntV2",
47427		Input: []tf.Input{
47428			shape, key, counter, alg, minval, maxval,
47429		},
47430	}
47431	op := scope.AddOperation(opspec)
47432	return op.Output(0)
47433}
47434
47435// EnqueueTPUEmbeddingIntegerBatchAttr is an optional argument to EnqueueTPUEmbeddingIntegerBatch.
47436type EnqueueTPUEmbeddingIntegerBatchAttr func(optionalAttr)
47437
47438// EnqueueTPUEmbeddingIntegerBatchDeviceOrdinal sets the optional device_ordinal attribute to value.
47439//
47440// value: The TPU device to use. Should be >= 0 and less than the number
47441// of TPU cores in the task on which the node is placed.
47442// If not specified, defaults to -1
47443func EnqueueTPUEmbeddingIntegerBatchDeviceOrdinal(value int64) EnqueueTPUEmbeddingIntegerBatchAttr {
47444	return func(m optionalAttr) {
47445		m["device_ordinal"] = value
47446	}
47447}
47448
47449// An op that enqueues a list of input batch tensors to TPUEmbedding.
47450//
47451// Arguments:
47452//	batch: A list of 1D tensors, one for each embedding table, containing the
47453// indices into the tables.
47454//	mode_override: A string input that overrides the mode specified in the
47455// TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',
47456// 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set
47457// in TPUEmbeddingConfiguration is used, otherwise mode_override is used.
47458//
47459// Returns the created operation.
47460func EnqueueTPUEmbeddingIntegerBatch(scope *Scope, batch []tf.Output, mode_override tf.Output, optional ...EnqueueTPUEmbeddingIntegerBatchAttr) (o *tf.Operation) {
47461	if scope.Err() != nil {
47462		return
47463	}
47464	attrs := map[string]interface{}{}
47465	for _, a := range optional {
47466		a(attrs)
47467	}
47468	opspec := tf.OpSpec{
47469		Type: "EnqueueTPUEmbeddingIntegerBatch",
47470		Input: []tf.Input{
47471			tf.OutputList(batch), mode_override,
47472		},
47473		Attrs: attrs,
47474	}
47475	return scope.AddOperation(opspec)
47476}
47477
47478// Component-wise divides a SparseTensor by a dense Tensor.
47479//
47480// *Limitation*: this Op only broadcasts the dense side to the sparse side, but not
47481// the other direction.
47482//
47483// Arguments:
47484//	sp_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
47485// SparseTensor, possibly not in canonical ordering.
47486//	sp_values: 1-D.  `N` non-empty values corresponding to `sp_indices`.
47487//	sp_shape: 1-D.  Shape of the input SparseTensor.
47488//	dense: `R`-D.  The dense Tensor operand.
47489//
47490// Returns 1-D.  The `N` values that are operated on.
47491func SparseDenseCwiseDiv(scope *Scope, sp_indices tf.Output, sp_values tf.Output, sp_shape tf.Output, dense tf.Output) (output tf.Output) {
47492	if scope.Err() != nil {
47493		return
47494	}
47495	opspec := tf.OpSpec{
47496		Type: "SparseDenseCwiseDiv",
47497		Input: []tf.Input{
47498			sp_indices, sp_values, sp_shape, dense,
47499		},
47500	}
47501	op := scope.AddOperation(opspec)
47502	return op.Output(0)
47503}
47504
47505// Returns the gradient of `Tile`.
47506//
47507// DEPRECATED at GraphDef version 3: TileGrad has been replaced with reduce_sum
47508//
47509// Since `Tile` takes an input and repeats the input `multiples` times
47510// along each dimension, `TileGrad` takes in `multiples` and aggregates
47511// each repeated tile of `input` into `output`.
47512func TileGrad(scope *Scope, input tf.Output, multiples tf.Output) (output tf.Output) {
47513	if scope.Err() != nil {
47514		return
47515	}
47516	opspec := tf.OpSpec{
47517		Type: "TileGrad",
47518		Input: []tf.Input{
47519			input, multiples,
47520		},
47521	}
47522	op := scope.AddOperation(opspec)
47523	return op.Output(0)
47524}
47525
47526// AudioSummaryAttr is an optional argument to AudioSummary.
47527type AudioSummaryAttr func(optionalAttr)
47528
47529// AudioSummaryMaxOutputs sets the optional max_outputs attribute to value.
47530//
47531// value: Max number of batch elements to generate audio for.
47532// If not specified, defaults to 3
47533//
47534// REQUIRES: value >= 1
47535func AudioSummaryMaxOutputs(value int64) AudioSummaryAttr {
47536	return func(m optionalAttr) {
47537		m["max_outputs"] = value
47538	}
47539}
47540
47541// Outputs a `Summary` protocol buffer with audio.
47542//
47543// DEPRECATED at GraphDef version 15: Use AudioSummaryV2.
47544//
47545// The summary has up to `max_outputs` summary values containing audio. The
47546// audio is built from `tensor` which must be 3-D with shape `[batch_size,
47547// frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
47548// assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`.
47549//
47550// The `tag` argument is a scalar `Tensor` of type `string`.  It is used to
47551// build the `tag` of the summary values:
47552//
47553// *  If `max_outputs` is 1, the summary value tag is '*tag*/audio'.
47554// *  If `max_outputs` is greater than 1, the summary value tags are
47555//    generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.
47556//
47557// Arguments:
47558//	tag: Scalar. Used to build the `tag` attribute of the summary values.
47559//	tensor: 2-D of shape `[batch_size, frames]`.
47560//	sample_rate: The sample rate of the signal in hertz.
47561//
47562// Returns Scalar. Serialized `Summary` protocol buffer.
47563func AudioSummary(scope *Scope, tag tf.Output, tensor tf.Output, sample_rate float32, optional ...AudioSummaryAttr) (summary tf.Output) {
47564	if scope.Err() != nil {
47565		return
47566	}
47567	attrs := map[string]interface{}{"sample_rate": sample_rate}
47568	for _, a := range optional {
47569		a(attrs)
47570	}
47571	opspec := tf.OpSpec{
47572		Type: "AudioSummary",
47573		Input: []tf.Input{
47574			tag, tensor,
47575		},
47576		Attrs: attrs,
47577	}
47578	op := scope.AddOperation(opspec)
47579	return op.Output(0)
47580}
47581
47582// LoadTPUEmbeddingFTRLParametersAttr is an optional argument to LoadTPUEmbeddingFTRLParameters.
47583type LoadTPUEmbeddingFTRLParametersAttr func(optionalAttr)
47584
47585// LoadTPUEmbeddingFTRLParametersTableId sets the optional table_id attribute to value.
47586// If not specified, defaults to -1
47587func LoadTPUEmbeddingFTRLParametersTableId(value int64) LoadTPUEmbeddingFTRLParametersAttr {
47588	return func(m optionalAttr) {
47589		m["table_id"] = value
47590	}
47591}
47592
47593// LoadTPUEmbeddingFTRLParametersTableName sets the optional table_name attribute to value.
47594// If not specified, defaults to ""
47595func LoadTPUEmbeddingFTRLParametersTableName(value string) LoadTPUEmbeddingFTRLParametersAttr {
47596	return func(m optionalAttr) {
47597		m["table_name"] = value
47598	}
47599}
47600
47601// LoadTPUEmbeddingFTRLParametersConfig sets the optional config attribute to value.
47602// If not specified, defaults to ""
47603func LoadTPUEmbeddingFTRLParametersConfig(value string) LoadTPUEmbeddingFTRLParametersAttr {
47604	return func(m optionalAttr) {
47605		m["config"] = value
47606	}
47607}
47608
47609// Load FTRL embedding parameters.
47610//
47611// An op that loads optimization parameters into HBM for embedding. Must be
47612// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
47613// embedding table configuration. For example, this op is used to install
47614// parameters that are loaded from a checkpoint before a training loop is
47615// executed.
47616//
47617// Arguments:
47618//	parameters: Value of parameters used in the FTRL optimization algorithm.
47619//	accumulators: Value of accumulators used in the FTRL optimization algorithm.
47620//	linears: Value of linears used in the FTRL optimization algorithm.
47621//
47622//
47623//
47624// Returns the created operation.
47625func LoadTPUEmbeddingFTRLParameters(scope *Scope, parameters tf.Output, accumulators tf.Output, linears tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingFTRLParametersAttr) (o *tf.Operation) {
47626	if scope.Err() != nil {
47627		return
47628	}
47629	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
47630	for _, a := range optional {
47631		a(attrs)
47632	}
47633	opspec := tf.OpSpec{
47634		Type: "LoadTPUEmbeddingFTRLParameters",
47635		Input: []tf.Input{
47636			parameters, accumulators, linears,
47637		},
47638		Attrs: attrs,
47639	}
47640	return scope.AddOperation(opspec)
47641}
47642
47643// Returns an element-wise indication of the sign of a number.
47644//
47645// `y = sign(x) = -1` if `x < 0`; 0 if `x == 0`; 1 if `x > 0`.
47646//
47647// For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.
47648//
47649// Example usage:
47650// >>> tf.math.sign([0., 2., -3.])
47651// <tf.Tensor: shape=(3,), dtype=float32, numpy=array([ 0.,  1., -1.], dtype=float32)>
47652func Sign(scope *Scope, x tf.Output) (y tf.Output) {
47653	if scope.Err() != nil {
47654		return
47655	}
47656	opspec := tf.OpSpec{
47657		Type: "Sign",
47658		Input: []tf.Input{
47659			x,
47660		},
47661	}
47662	op := scope.AddOperation(opspec)
47663	return op.Output(0)
47664}
47665
47666// ResourceApplyAddSignAttr is an optional argument to ResourceApplyAddSign.
47667type ResourceApplyAddSignAttr func(optionalAttr)
47668
47669// ResourceApplyAddSignUseLocking sets the optional use_locking attribute to value.
47670//
47671// value: If `True`, updating of the var and m tensors is
47672// protected by a lock; otherwise the behavior is undefined, but may exhibit less
47673// contention.
47674// If not specified, defaults to false
47675func ResourceApplyAddSignUseLocking(value bool) ResourceApplyAddSignAttr {
47676	return func(m optionalAttr) {
47677		m["use_locking"] = value
47678	}
47679}
47680
47681// Update '*var' according to the AddSign update.
47682//
47683// m_t <- beta1 * m_{t-1} + (1 - beta1) * g
47684// update <- (alpha + sign_decay * sign(g) *sign(m)) * g
47685// variable <- variable - lr_t * update
47686//
47687// Arguments:
47688//	var_: Should be from a Variable().
47689//	m: Should be from a Variable().
47690//	lr: Scaling factor. Must be a scalar.
47691//	alpha: Must be a scalar.
47692//	sign_decay: Must be a scalar.
47693//	beta: Must be a scalar.
47694//	grad: The gradient.
47695//
47696// Returns the created operation.
47697func ResourceApplyAddSign(scope *Scope, var_ tf.Output, m tf.Output, lr tf.Output, alpha tf.Output, sign_decay tf.Output, beta tf.Output, grad tf.Output, optional ...ResourceApplyAddSignAttr) (o *tf.Operation) {
47698	if scope.Err() != nil {
47699		return
47700	}
47701	attrs := map[string]interface{}{}
47702	for _, a := range optional {
47703		a(attrs)
47704	}
47705	opspec := tf.OpSpec{
47706		Type: "ResourceApplyAddSign",
47707		Input: []tf.Input{
47708			var_, m, lr, alpha, sign_decay, beta, grad,
47709		},
47710		Attrs: attrs,
47711	}
47712	return scope.AddOperation(opspec)
47713}
47714
47715// Computes inverse hyperbolic tangent of x element-wise.
47716//
47717//   Given an input tensor, this function computes inverse hyperbolic tangent
47718//   for every element in the tensor. Input range is `[-1,1]` and output range is
47719//   `[-inf, inf]`. If input is `-1`, output will be `-inf` and if the
47720//   input is `1`, output will be `inf`. Values outside the range will have
47721//   `nan` as output.
47722//
47723//   ```python
47724//   x = tf.constant([-float("inf"), -1, -0.5, 1, 0, 0.5, 10, float("inf")])
47725//   tf.math.atanh(x) ==> [nan -inf -0.54930615 inf  0. 0.54930615 nan nan]
47726//   ```
47727func Atanh(scope *Scope, x tf.Output) (y tf.Output) {
47728	if scope.Err() != nil {
47729		return
47730	}
47731	opspec := tf.OpSpec{
47732		Type: "Atanh",
47733		Input: []tf.Input{
47734			x,
47735		},
47736	}
47737	op := scope.AddOperation(opspec)
47738	return op.Output(0)
47739}
47740
47741// RetrieveTPUEmbeddingFTRLParametersAttr is an optional argument to RetrieveTPUEmbeddingFTRLParameters.
47742type RetrieveTPUEmbeddingFTRLParametersAttr func(optionalAttr)
47743
47744// RetrieveTPUEmbeddingFTRLParametersTableId sets the optional table_id attribute to value.
47745// If not specified, defaults to -1
47746func RetrieveTPUEmbeddingFTRLParametersTableId(value int64) RetrieveTPUEmbeddingFTRLParametersAttr {
47747	return func(m optionalAttr) {
47748		m["table_id"] = value
47749	}
47750}
47751
47752// RetrieveTPUEmbeddingFTRLParametersTableName sets the optional table_name attribute to value.
47753// If not specified, defaults to ""
47754func RetrieveTPUEmbeddingFTRLParametersTableName(value string) RetrieveTPUEmbeddingFTRLParametersAttr {
47755	return func(m optionalAttr) {
47756		m["table_name"] = value
47757	}
47758}
47759
47760// RetrieveTPUEmbeddingFTRLParametersConfig sets the optional config attribute to value.
47761// If not specified, defaults to ""
47762func RetrieveTPUEmbeddingFTRLParametersConfig(value string) RetrieveTPUEmbeddingFTRLParametersAttr {
47763	return func(m optionalAttr) {
47764		m["config"] = value
47765	}
47766}
47767
47768// Retrieve FTRL embedding parameters.
47769//
47770// An op that retrieves optimization parameters from embedding to host
47771// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
47772// the correct embedding table configuration. For example, this op is
47773// used to retrieve updated parameters before saving a checkpoint.
47774//
47775// Returns:
47776//	parameters: Parameter parameters updated by the FTRL optimization algorithm.
47777//	accumulators: Parameter accumulators updated by the FTRL optimization algorithm.
47778//	linears: Parameter linears updated by the FTRL optimization algorithm.
47779func RetrieveTPUEmbeddingFTRLParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingFTRLParametersAttr) (parameters tf.Output, accumulators tf.Output, linears tf.Output) {
47780	if scope.Err() != nil {
47781		return
47782	}
47783	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
47784	for _, a := range optional {
47785		a(attrs)
47786	}
47787	opspec := tf.OpSpec{
47788		Type: "RetrieveTPUEmbeddingFTRLParameters",
47789
47790		Attrs: attrs,
47791	}
47792	op := scope.AddOperation(opspec)
47793	return op.Output(0), op.Output(1), op.Output(2)
47794}
47795
47796// Strip leading and trailing whitespaces from the Tensor.
47797//
47798// Arguments:
47799//	input: A string `Tensor` of any shape.
47800//
47801// Returns A string `Tensor` of the same shape as the input.
47802//
47803// Examples:
47804//
47805// >>> tf.strings.strip(["\nTensorFlow", "     The python library    "]).numpy()
47806// array([b'TensorFlow', b'The python library'], dtype=object)
47807func StringStrip(scope *Scope, input tf.Output) (output tf.Output) {
47808	if scope.Err() != nil {
47809		return
47810	}
47811	opspec := tf.OpSpec{
47812		Type: "StringStrip",
47813		Input: []tf.Input{
47814			input,
47815		},
47816	}
47817	op := scope.AddOperation(opspec)
47818	return op.Output(0)
47819}
47820
47821// Returns the value stored in an Optional variant or raises an error if none exists.
47822func OptionalGetValue(scope *Scope, optional tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (components []tf.Output) {
47823	if scope.Err() != nil {
47824		return
47825	}
47826	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
47827	opspec := tf.OpSpec{
47828		Type: "OptionalGetValue",
47829		Input: []tf.Input{
47830			optional,
47831		},
47832		Attrs: attrs,
47833	}
47834	op := scope.AddOperation(opspec)
47835	if scope.Err() != nil {
47836		return
47837	}
47838	var idx int
47839	var err error
47840	if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
47841		scope.UpdateErr("OptionalGetValue", err)
47842		return
47843	}
47844	return components
47845}
47846
47847// Determine the script codes of a given tensor of Unicode integer code points.
47848//
47849// This operation converts Unicode code points to script codes corresponding to
47850// each code point. Script codes correspond to International Components for
47851// Unicode (ICU) UScriptCode values.
47852//
47853// See
47854// [ICU project docs](http://icu-project.org/apiref/icu4c/uscript_8h.html)
47855// for more details on script codes.
47856//
47857// For an example, see the unicode strings guide on [unicode scripts]
47858// (https://www.tensorflow.org/tutorials/load_data/unicode#representing_unicode).
47859//
47860// Returns -1 (USCRIPT_INVALID_CODE) for invalid codepoints. Output shape will
47861// match input shape.
47862//
47863// Examples:
47864//
47865// >>> tf.strings.unicode_script([1, 31, 38])
47866// <tf.Tensor: shape=(3,), dtype=int32, numpy=array([0, 0, 0], dtype=int32)>
47867//
47868// Arguments:
47869//	input: A Tensor of int32 Unicode code points.
47870//
47871// Returns A Tensor of int32 script codes corresponding to each input code point.
47872func UnicodeScript(scope *Scope, input tf.Output) (output tf.Output) {
47873	if scope.Err() != nil {
47874		return
47875	}
47876	opspec := tf.OpSpec{
47877		Type: "UnicodeScript",
47878		Input: []tf.Input{
47879			input,
47880		},
47881	}
47882	op := scope.AddOperation(opspec)
47883	return op.Output(0)
47884}
47885
47886// Computes gradients for the exponential linear (Elu) operation.
47887//
47888// Arguments:
47889//	gradients: The backpropagated gradients to the corresponding Elu operation.
47890//	outputs: The outputs of the corresponding Elu operation.
47891//
47892// Returns The gradients: `gradients * (outputs + 1)` if outputs < 0,
47893// `gradients` otherwise.
47894func EluGrad(scope *Scope, gradients tf.Output, outputs tf.Output) (backprops tf.Output) {
47895	if scope.Err() != nil {
47896		return
47897	}
47898	opspec := tf.OpSpec{
47899		Type: "EluGrad",
47900		Input: []tf.Input{
47901			gradients, outputs,
47902		},
47903	}
47904	op := scope.AddOperation(opspec)
47905	return op.Output(0)
47906}
47907
47908// ParameterizedTruncatedNormalAttr is an optional argument to ParameterizedTruncatedNormal.
47909type ParameterizedTruncatedNormalAttr func(optionalAttr)
47910
47911// ParameterizedTruncatedNormalSeed sets the optional seed attribute to value.
47912//
47913// value: If either `seed` or `seed2` are set to be non-zero, the random number
47914// generator is seeded by the given seed.  Otherwise, it is seeded by a
47915// random seed.
47916// If not specified, defaults to 0
47917func ParameterizedTruncatedNormalSeed(value int64) ParameterizedTruncatedNormalAttr {
47918	return func(m optionalAttr) {
47919		m["seed"] = value
47920	}
47921}
47922
47923// ParameterizedTruncatedNormalSeed2 sets the optional seed2 attribute to value.
47924//
47925// value: A second seed to avoid seed collision.
47926// If not specified, defaults to 0
47927func ParameterizedTruncatedNormalSeed2(value int64) ParameterizedTruncatedNormalAttr {
47928	return func(m optionalAttr) {
47929		m["seed2"] = value
47930	}
47931}
47932
47933// Outputs random values from a normal distribution. The parameters may each be a
47934//
47935// scalar which applies to the entire output, or a vector of length shape[0] which
47936// stores the parameters for each batch.
47937//
47938// Arguments:
47939//	shape: The shape of the output tensor. Batches are indexed by the 0th dimension.
47940//	means: The mean parameter of each batch.
47941//	stdevs: The standard deviation parameter of each batch. Must be greater than 0.
47942//	minvals: The minimum cutoff. May be -infinity.
47943//	maxvals: The maximum cutoff. May be +infinity, and must be more than the minval
47944// for each batch.
47945//
47946// Returns A matrix of shape num_batches x samples_per_batch, filled with random
47947// truncated normal values using the parameters for each row.
47948func ParameterizedTruncatedNormal(scope *Scope, shape tf.Output, means tf.Output, stdevs tf.Output, minvals tf.Output, maxvals tf.Output, optional ...ParameterizedTruncatedNormalAttr) (output tf.Output) {
47949	if scope.Err() != nil {
47950		return
47951	}
47952	attrs := map[string]interface{}{}
47953	for _, a := range optional {
47954		a(attrs)
47955	}
47956	opspec := tf.OpSpec{
47957		Type: "ParameterizedTruncatedNormal",
47958		Input: []tf.Input{
47959			shape, means, stdevs, minvals, maxvals,
47960		},
47961		Attrs: attrs,
47962	}
47963	op := scope.AddOperation(opspec)
47964	return op.Output(0)
47965}
47966
47967// Retrieves the tree ensemble resource stamp token, number of trees and growing statistics.
47968//
47969// Arguments:
47970//	tree_ensemble_handle: Handle to the tree ensemble.
47971//
47972// Returns:
47973//	stamp_token: Stamp token of the tree ensemble resource.
47974//	num_trees: The number of trees in the tree ensemble resource.
47975//	num_finalized_trees: The number of trees that were finished successfully.
47976//	num_attempted_layers: The number of layers we attempted to build (but not necessarily succeeded).
47977//	last_layer_nodes_range: Rank size 2 tensor that contains start and end ids of the nodes in the latest
47978// layer.
47979func BoostedTreesGetEnsembleStates(scope *Scope, tree_ensemble_handle tf.Output) (stamp_token tf.Output, num_trees tf.Output, num_finalized_trees tf.Output, num_attempted_layers tf.Output, last_layer_nodes_range tf.Output) {
47980	if scope.Err() != nil {
47981		return
47982	}
47983	opspec := tf.OpSpec{
47984		Type: "BoostedTreesGetEnsembleStates",
47985		Input: []tf.Input{
47986			tree_ensemble_handle,
47987		},
47988	}
47989	op := scope.AddOperation(opspec)
47990	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
47991}
47992
47993// ResourceScatterNdAddAttr is an optional argument to ResourceScatterNdAdd.
47994type ResourceScatterNdAddAttr func(optionalAttr)
47995
47996// ResourceScatterNdAddUseLocking sets the optional use_locking attribute to value.
47997//
47998// value: An optional bool. Defaults to True. If True, the assignment will
47999// be protected by a lock; otherwise the behavior is undefined,
48000// but may exhibit less contention.
48001// If not specified, defaults to true
48002func ResourceScatterNdAddUseLocking(value bool) ResourceScatterNdAddAttr {
48003	return func(m optionalAttr) {
48004		m["use_locking"] = value
48005	}
48006}
48007
48008// Applies sparse addition to individual values or slices in a Variable.
48009//
48010// `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
48011//
48012// `indices` must be integer tensor, containing indices into `ref`.
48013// It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
48014//
48015// The innermost dimension of `indices` (with length `K`) corresponds to
48016// indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
48017// dimension of `ref`.
48018//
48019// `updates` is `Tensor` of rank `Q-1+P-K` with shape:
48020//
48021// ```
48022// [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]
48023// ```
48024//
48025// For example, say we want to add 4 scattered elements to a rank-1 tensor to
48026// 8 elements. In Python, that addition would look like this:
48027//
48028// ```python
48029// ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True)
48030// indices = tf.constant([[4], [3], [1], [7]])
48031// updates = tf.constant([9, 10, 11, 12])
48032// add = tf.scatter_nd_add(ref, indices, updates)
48033// with tf.Session() as sess:
48034//   print sess.run(add)
48035// ```
48036//
48037// The resulting update to ref would look like this:
48038//
48039//     [1, 13, 3, 14, 14, 6, 7, 20]
48040//
48041// See `tf.scatter_nd` for more details about how to make updates to
48042// slices.
48043//
48044// Arguments:
48045//	ref: A resource handle. Must be from a VarHandleOp.
48046//	indices: A Tensor. Must be one of the following types: int32, int64.
48047// A tensor of indices into ref.
48048//	updates: A Tensor. Must have the same type as ref. A tensor of
48049// values to add to ref.
48050//
48051// Returns the created operation.
48052func ResourceScatterNdAdd(scope *Scope, ref tf.Output, indices tf.Output, updates tf.Output, optional ...ResourceScatterNdAddAttr) (o *tf.Operation) {
48053	if scope.Err() != nil {
48054		return
48055	}
48056	attrs := map[string]interface{}{}
48057	for _, a := range optional {
48058		a(attrs)
48059	}
48060	opspec := tf.OpSpec{
48061		Type: "ResourceScatterNdAdd",
48062		Input: []tf.Input{
48063			ref, indices, updates,
48064		},
48065		Attrs: attrs,
48066	}
48067	return scope.AddOperation(opspec)
48068}
48069
48070// Computes the mean along segments of a tensor.
48071//
48072// Read
48073// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
48074// for an explanation of segments.
48075//
48076// Computes a tensor such that
48077// \\(output_i = \frac{\sum_j data_j}{N}\\) where `mean` is
48078// over `j` such that `segment_ids[j] == i` and `N` is the total number of
48079// values summed.
48080//
48081// If the mean is empty for a given segment ID `i`, `output[i] = 0`.
48082//
48083// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
48084// <img style="width:100%" src="https://www.tensorflow.org/images/SegmentMean.png" alt>
48085// </div>
48086//
48087// For example:
48088//
48089// ```
48090// c = tf.constant([[1.0,2,3,4], [4, 3, 2, 1], [5,6,7,8]])
48091// tf.segment_mean(c, tf.constant([0, 0, 1]))
48092// # ==> [[2.5, 2.5, 2.5, 2.5],
48093// #      [5, 6, 7, 8]]
48094// ```
48095//
48096//
48097// Arguments:
48098//
48099//	segment_ids: A 1-D tensor whose size is equal to the size of `data`'s
48100// first dimension.  Values should be sorted and can be repeated.
48101//
48102// Returns Has same shape as data, except for dimension 0 which
48103// has size `k`, the number of segments.
48104func SegmentMean(scope *Scope, data tf.Output, segment_ids tf.Output) (output tf.Output) {
48105	if scope.Err() != nil {
48106		return
48107	}
48108	opspec := tf.OpSpec{
48109		Type: "SegmentMean",
48110		Input: []tf.Input{
48111			data, segment_ids,
48112		},
48113	}
48114	op := scope.AddOperation(opspec)
48115	return op.Output(0)
48116}
48117
48118// CTCLossV2Attr is an optional argument to CTCLossV2.
48119type CTCLossV2Attr func(optionalAttr)
48120
48121// CTCLossV2PreprocessCollapseRepeated sets the optional preprocess_collapse_repeated attribute to value.
48122//
48123// value: Scalar, if true then repeated labels are
48124// collapsed prior to the CTC calculation.
48125// If not specified, defaults to false
48126func CTCLossV2PreprocessCollapseRepeated(value bool) CTCLossV2Attr {
48127	return func(m optionalAttr) {
48128		m["preprocess_collapse_repeated"] = value
48129	}
48130}
48131
48132// CTCLossV2CtcMergeRepeated sets the optional ctc_merge_repeated attribute to value.
48133//
48134// value: Scalar.  If set to false, *during* CTC calculation
48135// repeated non-blank labels will not be merged and are interpreted as
48136// individual labels.  This is a simplified version of CTC.
48137// If not specified, defaults to true
48138func CTCLossV2CtcMergeRepeated(value bool) CTCLossV2Attr {
48139	return func(m optionalAttr) {
48140		m["ctc_merge_repeated"] = value
48141	}
48142}
48143
48144// CTCLossV2IgnoreLongerOutputsThanInputs sets the optional ignore_longer_outputs_than_inputs attribute to value.
48145//
48146// value: Scalar. If set to true, during CTC
48147// calculation, items that have longer output sequences than input sequences
48148// are skipped: they don't contribute to the loss term and have zero-gradient.
48149// If not specified, defaults to false
48150func CTCLossV2IgnoreLongerOutputsThanInputs(value bool) CTCLossV2Attr {
48151	return func(m optionalAttr) {
48152		m["ignore_longer_outputs_than_inputs"] = value
48153	}
48154}
48155
48156// Calculates the CTC Loss (log probability) for each batch entry.  Also calculates
48157//
48158// the gradient.  This class performs the softmax operation for you, so inputs
48159// should be e.g. linear projections of outputs by an LSTM.
48160//
48161// Arguments:
48162//	inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. Default blank
48163// label is 0 rather num_classes - 1.
48164//	labels_indices: The indices of a `SparseTensor<int32, 2>`.
48165// `labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for
48166// `(batch b, time t)`.
48167//	labels_values: The values (labels) associated with the given batch and time.
48168//	sequence_length: A vector containing sequence lengths (batch).
48169//
48170// Returns:
48171//	loss: A vector (batch) containing log-probabilities.
48172//	gradient: The gradient of `loss`.  3-D, shape:
48173// `(max_time x batch_size x num_classes)`.
48174func CTCLossV2(scope *Scope, inputs tf.Output, labels_indices tf.Output, labels_values tf.Output, sequence_length tf.Output, optional ...CTCLossV2Attr) (loss tf.Output, gradient tf.Output) {
48175	if scope.Err() != nil {
48176		return
48177	}
48178	attrs := map[string]interface{}{}
48179	for _, a := range optional {
48180		a(attrs)
48181	}
48182	opspec := tf.OpSpec{
48183		Type: "CTCLossV2",
48184		Input: []tf.Input{
48185			inputs, labels_indices, labels_values, sequence_length,
48186		},
48187		Attrs: attrs,
48188	}
48189	op := scope.AddOperation(opspec)
48190	return op.Output(0), op.Output(1)
48191}
48192
48193// ResourceSparseApplyKerasMomentumAttr is an optional argument to ResourceSparseApplyKerasMomentum.
48194type ResourceSparseApplyKerasMomentumAttr func(optionalAttr)
48195
48196// ResourceSparseApplyKerasMomentumUseLocking sets the optional use_locking attribute to value.
48197//
48198// value: If `True`, updating of the var and accum tensors will be protected
48199// by a lock; otherwise the behavior is undefined, but may exhibit less
48200// contention.
48201// If not specified, defaults to false
48202func ResourceSparseApplyKerasMomentumUseLocking(value bool) ResourceSparseApplyKerasMomentumAttr {
48203	return func(m optionalAttr) {
48204		m["use_locking"] = value
48205	}
48206}
48207
48208// ResourceSparseApplyKerasMomentumUseNesterov sets the optional use_nesterov attribute to value.
48209//
48210// value: If `True`, the tensor passed to compute grad will be
48211// var + momentum * accum, so in the end, the var you get is actually
48212// var + momentum * accum.
48213// If not specified, defaults to false
48214func ResourceSparseApplyKerasMomentumUseNesterov(value bool) ResourceSparseApplyKerasMomentumAttr {
48215	return func(m optionalAttr) {
48216		m["use_nesterov"] = value
48217	}
48218}
48219
48220// Update relevant entries in '*var' and '*accum' according to the momentum scheme.
48221//
48222// Set use_nesterov = True if you want to use Nesterov momentum.
48223//
48224// That is for rows we have grad for, we update var and accum as follows:
48225//
48226// accum = accum * momentum - lr * grad
48227// var += accum
48228//
48229// Arguments:
48230//	var_: Should be from a Variable().
48231//	accum: Should be from a Variable().
48232//	lr: Learning rate. Must be a scalar.
48233//	grad: The gradient.
48234//	indices: A vector of indices into the first dimension of var and accum.
48235//	momentum: Momentum. Must be a scalar.
48236//
48237// Returns the created operation.
48238func ResourceSparseApplyKerasMomentum(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, indices tf.Output, momentum tf.Output, optional ...ResourceSparseApplyKerasMomentumAttr) (o *tf.Operation) {
48239	if scope.Err() != nil {
48240		return
48241	}
48242	attrs := map[string]interface{}{}
48243	for _, a := range optional {
48244		a(attrs)
48245	}
48246	opspec := tf.OpSpec{
48247		Type: "ResourceSparseApplyKerasMomentum",
48248		Input: []tf.Input{
48249			var_, accum, lr, grad, indices, momentum,
48250		},
48251		Attrs: attrs,
48252	}
48253	return scope.AddOperation(opspec)
48254}
48255
48256// Returns the result of a TPU compilation.
48257//
48258// This operation returns the result of a TPU compilation as a serialized
48259// CompilationResultProto, which holds a status and an error message if an error
48260// occurred during compilation.
48261func TPUCompilationResult(scope *Scope) (output tf.Output) {
48262	if scope.Err() != nil {
48263		return
48264	}
48265	opspec := tf.OpSpec{
48266		Type: "TPUCompilationResult",
48267	}
48268	op := scope.AddOperation(opspec)
48269	return op.Output(0)
48270}
48271
48272// Returns element-wise integer closest to x.
48273//
48274// If the result is midway between two representable values,
48275// the even representable is chosen.
48276// For example:
48277//
48278// ```
48279// rint(-1.5) ==> -2.0
48280// rint(0.5000001) ==> 1.0
48281// rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.]
48282// ```
48283func Rint(scope *Scope, x tf.Output) (y tf.Output) {
48284	if scope.Err() != nil {
48285		return
48286	}
48287	opspec := tf.OpSpec{
48288		Type: "Rint",
48289		Input: []tf.Input{
48290			x,
48291		},
48292	}
48293	op := scope.AddOperation(opspec)
48294	return op.Output(0)
48295}
48296
48297// ParseSequenceExampleV2Attr is an optional argument to ParseSequenceExampleV2.
48298type ParseSequenceExampleV2Attr func(optionalAttr)
48299
48300// ParseSequenceExampleV2NcontextSparse sets the optional Ncontext_sparse attribute to value.
48301// If not specified, defaults to 0
48302//
48303// REQUIRES: value >= 0
48304func ParseSequenceExampleV2NcontextSparse(value int64) ParseSequenceExampleV2Attr {
48305	return func(m optionalAttr) {
48306		m["Ncontext_sparse"] = value
48307	}
48308}
48309
48310// ParseSequenceExampleV2ContextSparseTypes sets the optional context_sparse_types attribute to value.
48311//
48312// value: A list of Ncontext_sparse types; the data types of data in
48313// each context Feature given in context_sparse_keys.
48314// Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),
48315// DT_INT64 (Int64List), and DT_STRING (BytesList).
48316// If not specified, defaults to <>
48317//
48318// REQUIRES: len(value) >= 0
48319func ParseSequenceExampleV2ContextSparseTypes(value []tf.DataType) ParseSequenceExampleV2Attr {
48320	return func(m optionalAttr) {
48321		m["context_sparse_types"] = value
48322	}
48323}
48324
48325// ParseSequenceExampleV2ContextRaggedValueTypes sets the optional context_ragged_value_types attribute to value.
48326//
48327// value: RaggedTensor.value dtypes for the ragged context features.
48328// If not specified, defaults to <>
48329//
48330// REQUIRES: len(value) >= 0
48331func ParseSequenceExampleV2ContextRaggedValueTypes(value []tf.DataType) ParseSequenceExampleV2Attr {
48332	return func(m optionalAttr) {
48333		m["context_ragged_value_types"] = value
48334	}
48335}
48336
48337// ParseSequenceExampleV2ContextRaggedSplitTypes sets the optional context_ragged_split_types attribute to value.
48338//
48339// value: RaggedTensor.row_split dtypes for the ragged context features.
48340// If not specified, defaults to <>
48341//
48342// REQUIRES: len(value) >= 0
48343func ParseSequenceExampleV2ContextRaggedSplitTypes(value []tf.DataType) ParseSequenceExampleV2Attr {
48344	return func(m optionalAttr) {
48345		m["context_ragged_split_types"] = value
48346	}
48347}
48348
48349// ParseSequenceExampleV2ContextDenseShapes sets the optional context_dense_shapes attribute to value.
48350//
48351// value: A list of Ncontext_dense shapes; the shapes of data in
48352// each context Feature given in context_dense_keys.
48353// The number of elements in the Feature corresponding to context_dense_key[j]
48354// must always equal context_dense_shapes[j].NumEntries().
48355// The shape of context_dense_values[j] will match context_dense_shapes[j].
48356// If not specified, defaults to <>
48357//
48358// REQUIRES: len(value) >= 0
48359func ParseSequenceExampleV2ContextDenseShapes(value []tf.Shape) ParseSequenceExampleV2Attr {
48360	return func(m optionalAttr) {
48361		m["context_dense_shapes"] = value
48362	}
48363}
48364
48365// ParseSequenceExampleV2NfeatureListSparse sets the optional Nfeature_list_sparse attribute to value.
48366// If not specified, defaults to 0
48367//
48368// REQUIRES: value >= 0
48369func ParseSequenceExampleV2NfeatureListSparse(value int64) ParseSequenceExampleV2Attr {
48370	return func(m optionalAttr) {
48371		m["Nfeature_list_sparse"] = value
48372	}
48373}
48374
48375// ParseSequenceExampleV2NfeatureListDense sets the optional Nfeature_list_dense attribute to value.
48376// If not specified, defaults to 0
48377//
48378// REQUIRES: value >= 0
48379func ParseSequenceExampleV2NfeatureListDense(value int64) ParseSequenceExampleV2Attr {
48380	return func(m optionalAttr) {
48381		m["Nfeature_list_dense"] = value
48382	}
48383}
48384
48385// ParseSequenceExampleV2FeatureListDenseTypes sets the optional feature_list_dense_types attribute to value.
48386// If not specified, defaults to <>
48387//
48388// REQUIRES: len(value) >= 0
48389func ParseSequenceExampleV2FeatureListDenseTypes(value []tf.DataType) ParseSequenceExampleV2Attr {
48390	return func(m optionalAttr) {
48391		m["feature_list_dense_types"] = value
48392	}
48393}
48394
48395// ParseSequenceExampleV2FeatureListSparseTypes sets the optional feature_list_sparse_types attribute to value.
48396//
48397// value: A list of Nfeature_list_sparse types; the data types
48398// of data in each FeatureList given in feature_list_sparse_keys.
48399// Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),
48400// DT_INT64 (Int64List), and DT_STRING (BytesList).
48401// If not specified, defaults to <>
48402//
48403// REQUIRES: len(value) >= 0
48404func ParseSequenceExampleV2FeatureListSparseTypes(value []tf.DataType) ParseSequenceExampleV2Attr {
48405	return func(m optionalAttr) {
48406		m["feature_list_sparse_types"] = value
48407	}
48408}
48409
48410// ParseSequenceExampleV2FeatureListRaggedValueTypes sets the optional feature_list_ragged_value_types attribute to value.
48411//
48412// value: RaggedTensor.value dtypes for the ragged FeatureList features.
48413// If not specified, defaults to <>
48414//
48415// REQUIRES: len(value) >= 0
48416func ParseSequenceExampleV2FeatureListRaggedValueTypes(value []tf.DataType) ParseSequenceExampleV2Attr {
48417	return func(m optionalAttr) {
48418		m["feature_list_ragged_value_types"] = value
48419	}
48420}
48421
48422// ParseSequenceExampleV2FeatureListRaggedSplitTypes sets the optional feature_list_ragged_split_types attribute to value.
48423//
48424// value: RaggedTensor.row_split dtypes for the ragged FeatureList features.
48425// If not specified, defaults to <>
48426//
48427// REQUIRES: len(value) >= 0
48428func ParseSequenceExampleV2FeatureListRaggedSplitTypes(value []tf.DataType) ParseSequenceExampleV2Attr {
48429	return func(m optionalAttr) {
48430		m["feature_list_ragged_split_types"] = value
48431	}
48432}
48433
48434// ParseSequenceExampleV2FeatureListDenseShapes sets the optional feature_list_dense_shapes attribute to value.
48435//
48436// value: A list of Nfeature_list_dense shapes; the shapes of
48437// data in each FeatureList given in feature_list_dense_keys.
48438// The shape of each Feature in the FeatureList corresponding to
48439// feature_list_dense_key[j] must always equal
48440// feature_list_dense_shapes[j].NumEntries().
48441// If not specified, defaults to <>
48442//
48443// REQUIRES: len(value) >= 0
48444func ParseSequenceExampleV2FeatureListDenseShapes(value []tf.Shape) ParseSequenceExampleV2Attr {
48445	return func(m optionalAttr) {
48446		m["feature_list_dense_shapes"] = value
48447	}
48448}
48449
48450// Transforms a vector of tf.io.SequenceExample protos (as strings) into
48451// typed tensors.
48452//
48453// Arguments:
48454//	serialized: A scalar or vector containing binary serialized SequenceExample protos.
48455//	debug_name: A scalar or vector containing the names of the serialized protos.
48456// May contain, for example, table key (descriptive) name for the
48457// corresponding serialized proto.  This is purely useful for debugging
48458// purposes, and the presence of values here has no effect on the output.
48459// May also be an empty vector if no name is available.
48460//	context_sparse_keys: The keys expected in the Examples' features associated with context_sparse
48461// values.
48462//	context_dense_keys: The keys expected in the SequenceExamples' context features associated with
48463// dense values.
48464//	context_ragged_keys: The keys expected in the Examples' features associated with context_ragged
48465// values.
48466//	feature_list_sparse_keys: The keys expected in the FeatureLists associated with sparse values.
48467//	feature_list_dense_keys: The keys expected in the SequenceExamples' feature_lists associated
48468// with lists of dense values.
48469//	feature_list_ragged_keys: The keys expected in the FeatureLists associated with ragged values.
48470//	feature_list_dense_missing_assumed_empty: A vector corresponding 1:1 with feature_list_dense_keys, indicating which
48471// features may be missing from the SequenceExamples.  If the associated
48472// FeatureList is missing, it is treated as empty.
48473//	context_dense_defaults: A list of Ncontext_dense Tensors (some may be empty).
48474// context_dense_defaults[j] provides default values
48475// when the SequenceExample's context map lacks context_dense_key[j].
48476// If an empty Tensor is provided for context_dense_defaults[j],
48477// then the Feature context_dense_keys[j] is required.
48478// The input type is inferred from context_dense_defaults[j], even when it's
48479// empty.  If context_dense_defaults[j] is not empty, its shape must match
48480// context_dense_shapes[j].
48481func ParseSequenceExampleV2(scope *Scope, serialized tf.Output, debug_name tf.Output, context_sparse_keys tf.Output, context_dense_keys tf.Output, context_ragged_keys tf.Output, feature_list_sparse_keys tf.Output, feature_list_dense_keys tf.Output, feature_list_ragged_keys tf.Output, feature_list_dense_missing_assumed_empty tf.Output, context_dense_defaults []tf.Output, optional ...ParseSequenceExampleV2Attr) (context_sparse_indices []tf.Output, context_sparse_values []tf.Output, context_sparse_shapes []tf.Output, context_dense_values []tf.Output, context_ragged_values []tf.Output, context_ragged_row_splits []tf.Output, feature_list_sparse_indices []tf.Output, feature_list_sparse_values []tf.Output, feature_list_sparse_shapes []tf.Output, feature_list_dense_values []tf.Output, feature_list_dense_lengths []tf.Output, feature_list_ragged_values []tf.Output, feature_list_ragged_outer_splits []tf.Output, feature_list_ragged_inner_splits []tf.Output) {
48482	if scope.Err() != nil {
48483		return
48484	}
48485	attrs := map[string]interface{}{}
48486	for _, a := range optional {
48487		a(attrs)
48488	}
48489	opspec := tf.OpSpec{
48490		Type: "ParseSequenceExampleV2",
48491		Input: []tf.Input{
48492			serialized, debug_name, context_sparse_keys, context_dense_keys, context_ragged_keys, feature_list_sparse_keys, feature_list_dense_keys, feature_list_ragged_keys, feature_list_dense_missing_assumed_empty, tf.OutputList(context_dense_defaults),
48493		},
48494		Attrs: attrs,
48495	}
48496	op := scope.AddOperation(opspec)
48497	if scope.Err() != nil {
48498		return
48499	}
48500	var idx int
48501	var err error
48502	if context_sparse_indices, idx, err = makeOutputList(op, idx, "context_sparse_indices"); err != nil {
48503		scope.UpdateErr("ParseSequenceExampleV2", err)
48504		return
48505	}
48506	if context_sparse_values, idx, err = makeOutputList(op, idx, "context_sparse_values"); err != nil {
48507		scope.UpdateErr("ParseSequenceExampleV2", err)
48508		return
48509	}
48510	if context_sparse_shapes, idx, err = makeOutputList(op, idx, "context_sparse_shapes"); err != nil {
48511		scope.UpdateErr("ParseSequenceExampleV2", err)
48512		return
48513	}
48514	if context_dense_values, idx, err = makeOutputList(op, idx, "context_dense_values"); err != nil {
48515		scope.UpdateErr("ParseSequenceExampleV2", err)
48516		return
48517	}
48518	if context_ragged_values, idx, err = makeOutputList(op, idx, "context_ragged_values"); err != nil {
48519		scope.UpdateErr("ParseSequenceExampleV2", err)
48520		return
48521	}
48522	if context_ragged_row_splits, idx, err = makeOutputList(op, idx, "context_ragged_row_splits"); err != nil {
48523		scope.UpdateErr("ParseSequenceExampleV2", err)
48524		return
48525	}
48526	if feature_list_sparse_indices, idx, err = makeOutputList(op, idx, "feature_list_sparse_indices"); err != nil {
48527		scope.UpdateErr("ParseSequenceExampleV2", err)
48528		return
48529	}
48530	if feature_list_sparse_values, idx, err = makeOutputList(op, idx, "feature_list_sparse_values"); err != nil {
48531		scope.UpdateErr("ParseSequenceExampleV2", err)
48532		return
48533	}
48534	if feature_list_sparse_shapes, idx, err = makeOutputList(op, idx, "feature_list_sparse_shapes"); err != nil {
48535		scope.UpdateErr("ParseSequenceExampleV2", err)
48536		return
48537	}
48538	if feature_list_dense_values, idx, err = makeOutputList(op, idx, "feature_list_dense_values"); err != nil {
48539		scope.UpdateErr("ParseSequenceExampleV2", err)
48540		return
48541	}
48542	if feature_list_dense_lengths, idx, err = makeOutputList(op, idx, "feature_list_dense_lengths"); err != nil {
48543		scope.UpdateErr("ParseSequenceExampleV2", err)
48544		return
48545	}
48546	if feature_list_ragged_values, idx, err = makeOutputList(op, idx, "feature_list_ragged_values"); err != nil {
48547		scope.UpdateErr("ParseSequenceExampleV2", err)
48548		return
48549	}
48550	if feature_list_ragged_outer_splits, idx, err = makeOutputList(op, idx, "feature_list_ragged_outer_splits"); err != nil {
48551		scope.UpdateErr("ParseSequenceExampleV2", err)
48552		return
48553	}
48554	if feature_list_ragged_inner_splits, idx, err = makeOutputList(op, idx, "feature_list_ragged_inner_splits"); err != nil {
48555		scope.UpdateErr("ParseSequenceExampleV2", err)
48556		return
48557	}
48558	return context_sparse_indices, context_sparse_values, context_sparse_shapes, context_dense_values, context_ragged_values, context_ragged_row_splits, feature_list_sparse_indices, feature_list_sparse_values, feature_list_sparse_shapes, feature_list_dense_values, feature_list_dense_lengths, feature_list_ragged_values, feature_list_ragged_outer_splits, feature_list_ragged_inner_splits
48559}
48560
48561// Reverses specific dimensions of a tensor.
48562//
48563// Given a `tensor`, and a `bool` tensor `dims` representing the dimensions
48564// of `tensor`, this operation reverses each dimension i of `tensor` where
48565// `dims[i]` is `True`.
48566//
48567// `tensor` can have up to 8 dimensions. The number of dimensions
48568// of `tensor` must equal the number of elements in `dims`. In other words:
48569//
48570// `rank(tensor) = size(dims)`
48571//
48572// For example:
48573//
48574// ```
48575// # tensor 't' is [[[[ 0,  1,  2,  3],
48576// #                  [ 4,  5,  6,  7],
48577// #                  [ 8,  9, 10, 11]],
48578// #                 [[12, 13, 14, 15],
48579// #                  [16, 17, 18, 19],
48580// #                  [20, 21, 22, 23]]]]
48581// # tensor 't' shape is [1, 2, 3, 4]
48582//
48583// # 'dims' is [False, False, False, True]
48584// reverse(t, dims) ==> [[[[ 3,  2,  1,  0],
48585//                         [ 7,  6,  5,  4],
48586//                         [ 11, 10, 9, 8]],
48587//                        [[15, 14, 13, 12],
48588//                         [19, 18, 17, 16],
48589//                         [23, 22, 21, 20]]]]
48590//
48591// # 'dims' is [False, True, False, False]
48592// reverse(t, dims) ==> [[[[12, 13, 14, 15],
48593//                         [16, 17, 18, 19],
48594//                         [20, 21, 22, 23]
48595//                        [[ 0,  1,  2,  3],
48596//                         [ 4,  5,  6,  7],
48597//                         [ 8,  9, 10, 11]]]]
48598//
48599// # 'dims' is [False, False, True, False]
48600// reverse(t, dims) ==> [[[[8, 9, 10, 11],
48601//                         [4, 5, 6, 7],
48602//                         [0, 1, 2, 3]]
48603//                        [[20, 21, 22, 23],
48604//                         [16, 17, 18, 19],
48605//                         [12, 13, 14, 15]]]]
48606// ```
48607//
48608// Arguments:
48609//	tensor: Up to 8-D.
48610//	dims: 1-D. The dimensions to reverse.
48611//
48612// Returns The same shape as `tensor`.
48613func Reverse(scope *Scope, tensor tf.Output, dims tf.Output) (output tf.Output) {
48614	if scope.Err() != nil {
48615		return
48616	}
48617	opspec := tf.OpSpec{
48618		Type: "Reverse",
48619		Input: []tf.Input{
48620			tensor, dims,
48621		},
48622	}
48623	op := scope.AddOperation(opspec)
48624	return op.Output(0)
48625}
48626
48627// Wraps an arbitrary MLIR computation expressed as a module with a main() function.
48628//
48629// This operation does not have an associated kernel and is not intended to be
48630// executed in a regular TensorFlow session. Instead it is intended to be used for
48631// testing or for special case where a user intends to pass custom MLIR computation
48632// through a TensorFlow graph with the intent of having custom tooling processing
48633// it downstream (when targeting a different environment, like TensorFlow lite for
48634// example).
48635// The MLIR module is expected to have a main() function that will be used as an
48636// entry point. The inputs to the operations will be passed as argument to the
48637// main() function and the returned values of the main function mapped to the
48638// outputs.
48639// Example usage:
48640//
48641// ```
48642// import tensorflow as tf
48643// from tensorflow.compiler.mlir.tensorflow.gen_mlir_passthrough_op import mlir_passthrough_op
48644//
48645// mlir_module = '''python
48646// func @main(%arg0 : tensor<10xf32>, %arg1 : tensor<10xf32>) -> tensor<10x10xf32> {
48647//    %add = "magic.op"(%arg0, %arg1) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10x10xf32>
48648//    return %ret : tensor<10x10xf32>
48649// }
48650// '''
48651//
48652// @tf.function
48653// def foo(x, y):
48654//   return mlir_passthrough_op([x, y], mlir_module, Toutputs=[tf.float32])
48655//
48656// graph_def = foo.get_concrete_function(tf.TensorSpec([10], tf.float32), tf.TensorSpec([10], tf.float32)).graph.as_graph_def()
48657// ```
48658func MlirPassthroughOp(scope *Scope, inputs []tf.Output, mlir_module string, Toutputs []tf.DataType) (outputs []tf.Output) {
48659	if scope.Err() != nil {
48660		return
48661	}
48662	attrs := map[string]interface{}{"mlir_module": mlir_module, "Toutputs": Toutputs}
48663	opspec := tf.OpSpec{
48664		Type: "MlirPassthroughOp",
48665		Input: []tf.Input{
48666			tf.OutputList(inputs),
48667		},
48668		Attrs: attrs,
48669	}
48670	op := scope.AddOperation(opspec)
48671	if scope.Err() != nil {
48672		return
48673	}
48674	var idx int
48675	var err error
48676	if outputs, idx, err = makeOutputList(op, idx, "outputs"); err != nil {
48677		scope.UpdateErr("MlirPassthroughOp", err)
48678		return
48679	}
48680	return outputs
48681}
48682
48683// StringLowerAttr is an optional argument to StringLower.
48684type StringLowerAttr func(optionalAttr)
48685
48686// StringLowerEncoding sets the optional encoding attribute to value.
48687// If not specified, defaults to ""
48688func StringLowerEncoding(value string) StringLowerAttr {
48689	return func(m optionalAttr) {
48690		m["encoding"] = value
48691	}
48692}
48693
48694// Converts all uppercase characters into their respective lowercase replacements.
48695//
48696// Example:
48697//
48698// >>> tf.strings.lower("CamelCase string and ALL CAPS")
48699// <tf.Tensor: shape=(), dtype=string, numpy=b'camelcase string and all caps'>
48700//
48701func StringLower(scope *Scope, input tf.Output, optional ...StringLowerAttr) (output tf.Output) {
48702	if scope.Err() != nil {
48703		return
48704	}
48705	attrs := map[string]interface{}{}
48706	for _, a := range optional {
48707		a(attrs)
48708	}
48709	opspec := tf.OpSpec{
48710		Type: "StringLower",
48711		Input: []tf.Input{
48712			input,
48713		},
48714		Attrs: attrs,
48715	}
48716	op := scope.AddOperation(opspec)
48717	return op.Output(0)
48718}
48719
48720// EnqueueTPUEmbeddingSparseTensorBatchAttr is an optional argument to EnqueueTPUEmbeddingSparseTensorBatch.
48721type EnqueueTPUEmbeddingSparseTensorBatchAttr func(optionalAttr)
48722
48723// EnqueueTPUEmbeddingSparseTensorBatchDeviceOrdinal sets the optional device_ordinal attribute to value.
48724//
48725// value: The TPU device to use. Should be >= 0 and less than the number
48726// of TPU cores in the task on which the node is placed.
48727// If not specified, defaults to -1
48728func EnqueueTPUEmbeddingSparseTensorBatchDeviceOrdinal(value int64) EnqueueTPUEmbeddingSparseTensorBatchAttr {
48729	return func(m optionalAttr) {
48730		m["device_ordinal"] = value
48731	}
48732}
48733
48734// EnqueueTPUEmbeddingSparseTensorBatchCombiners sets the optional combiners attribute to value.
48735//
48736// value: A list of string scalars, one for each embedding table that specify
48737// how to normalize the embedding activations after weighted summation.
48738// Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have
48739// the sum of the weights be 0 for 'mean' or the sum of the squared weights be
48740// 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for
48741// all tables.
48742// If not specified, defaults to <>
48743func EnqueueTPUEmbeddingSparseTensorBatchCombiners(value []string) EnqueueTPUEmbeddingSparseTensorBatchAttr {
48744	return func(m optionalAttr) {
48745		m["combiners"] = value
48746	}
48747}
48748
48749// EnqueueTPUEmbeddingSparseTensorBatchMaxSequenceLengths sets the optional max_sequence_lengths attribute to value.
48750// If not specified, defaults to <>
48751func EnqueueTPUEmbeddingSparseTensorBatchMaxSequenceLengths(value []int64) EnqueueTPUEmbeddingSparseTensorBatchAttr {
48752	return func(m optionalAttr) {
48753		m["max_sequence_lengths"] = value
48754	}
48755}
48756
48757// EnqueueTPUEmbeddingSparseTensorBatchNumFeatures sets the optional num_features attribute to value.
48758// If not specified, defaults to <>
48759func EnqueueTPUEmbeddingSparseTensorBatchNumFeatures(value []int64) EnqueueTPUEmbeddingSparseTensorBatchAttr {
48760	return func(m optionalAttr) {
48761		m["num_features"] = value
48762	}
48763}
48764
48765// Eases the porting of code that uses tf.nn.embedding_lookup_sparse().
48766//
48767// sample_indices[i], embedding_indices[i] and aggregation_weights[i] correspond
48768// to the ith feature. table_ids[i] indicates which embedding table to look up ith
48769// feature.
48770//
48771// The tensors at corresponding positions in the three input lists (sample_indices,
48772// embedding_indices and aggregation_weights) must have the same shape, i.e. rank 1
48773// with dim_size() equal to the total number of lookups into the table described by
48774// the corresponding feature.
48775//
48776// Arguments:
48777//	sample_indices: A list of rank 1 Tensors specifying the training example to
48778// which the corresponding embedding_indices and aggregation_weights values
48779// belong. It corresponds to sp_ids.indices[:,0] in  embedding_lookup_sparse().
48780//	embedding_indices: A list of rank 1 Tensors, indices into the embedding tables.
48781// It corresponds to sp_ids.values in embedding_lookup_sparse().
48782//	aggregation_weights: A list of rank 1 Tensors containing per training example
48783// aggregation weights. It corresponds to sp_weights.values in
48784// embedding_lookup_sparse().
48785//	mode_override: A string input that overrides the mode specified in the
48786// TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',
48787// 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set
48788// in TPUEmbeddingConfiguration is used, otherwise mode_override is used.
48789//	table_ids: A list of integers specifying the identifier of the embedding table
48790// (offset of TableDescriptor in the TPUEmbeddingConfiguration) to lookup the
48791// corresponding input. The ith input is looked up using table_ids[i]. The size
48792// of the table_ids list must be equal to that of sample_indices,
48793// embedding_indices and aggregation_weights.
48794//
48795// Returns the created operation.
48796func EnqueueTPUEmbeddingSparseTensorBatch(scope *Scope, sample_indices []tf.Output, embedding_indices []tf.Output, aggregation_weights []tf.Output, mode_override tf.Output, table_ids []int64, optional ...EnqueueTPUEmbeddingSparseTensorBatchAttr) (o *tf.Operation) {
48797	if scope.Err() != nil {
48798		return
48799	}
48800	attrs := map[string]interface{}{"table_ids": table_ids}
48801	for _, a := range optional {
48802		a(attrs)
48803	}
48804	opspec := tf.OpSpec{
48805		Type: "EnqueueTPUEmbeddingSparseTensorBatch",
48806		Input: []tf.Input{
48807			tf.OutputList(sample_indices), tf.OutputList(embedding_indices), tf.OutputList(aggregation_weights), mode_override,
48808		},
48809		Attrs: attrs,
48810	}
48811	return scope.AddOperation(opspec)
48812}
48813
48814// ReverseSequenceAttr is an optional argument to ReverseSequence.
48815type ReverseSequenceAttr func(optionalAttr)
48816
48817// ReverseSequenceBatchDim sets the optional batch_dim attribute to value.
48818//
48819// value: The dimension along which reversal is performed.
48820// If not specified, defaults to 0
48821func ReverseSequenceBatchDim(value int64) ReverseSequenceAttr {
48822	return func(m optionalAttr) {
48823		m["batch_dim"] = value
48824	}
48825}
48826
48827// Reverses variable length slices.
48828//
48829// This op first slices `input` along the dimension `batch_dim`, and for each
48830// slice `i`, reverses the first `seq_lengths[i]` elements along
48831// the dimension `seq_dim`.
48832//
48833// The elements of `seq_lengths` must obey `seq_lengths[i] <= input.dims[seq_dim]`,
48834// and `seq_lengths` must be a vector of length `input.dims[batch_dim]`.
48835//
48836// The output slice `i` along dimension `batch_dim` is then given by input
48837// slice `i`, with the first `seq_lengths[i]` slices along dimension
48838// `seq_dim` reversed.
48839//
48840// For example:
48841//
48842// ```
48843// # Given this:
48844// batch_dim = 0
48845// seq_dim = 1
48846// input.dims = (4, 8, ...)
48847// seq_lengths = [7, 2, 3, 5]
48848//
48849// # then slices of input are reversed on seq_dim, but only up to seq_lengths:
48850// output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...]
48851// output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...]
48852// output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...]
48853// output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...]
48854//
48855// # while entries past seq_lens are copied through:
48856// output[0, 7:, :, ...] = input[0, 7:, :, ...]
48857// output[1, 2:, :, ...] = input[1, 2:, :, ...]
48858// output[2, 3:, :, ...] = input[2, 3:, :, ...]
48859// output[3, 2:, :, ...] = input[3, 2:, :, ...]
48860// ```
48861//
48862// In contrast, if:
48863//
48864// ```
48865// # Given this:
48866// batch_dim = 2
48867// seq_dim = 0
48868// input.dims = (8, ?, 4, ...)
48869// seq_lengths = [7, 2, 3, 5]
48870//
48871// # then slices of input are reversed on seq_dim, but only up to seq_lengths:
48872// output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...]
48873// output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...]
48874// output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...]
48875// output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...]
48876//
48877// # while entries past seq_lens are copied through:
48878// output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...]
48879// output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...]
48880// output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...]
48881// output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...]
48882// ```
48883//
48884// Arguments:
48885//	input: The input to reverse.
48886//	seq_lengths: 1-D with length `input.dims(batch_dim)` and
48887// `max(seq_lengths) <= input.dims(seq_dim)`
48888//	seq_dim: The dimension which is partially reversed.
48889//
48890// Returns The partially reversed input. It has the same shape as `input`.
48891func ReverseSequence(scope *Scope, input tf.Output, seq_lengths tf.Output, seq_dim int64, optional ...ReverseSequenceAttr) (output tf.Output) {
48892	if scope.Err() != nil {
48893		return
48894	}
48895	attrs := map[string]interface{}{"seq_dim": seq_dim}
48896	for _, a := range optional {
48897		a(attrs)
48898	}
48899	opspec := tf.OpSpec{
48900		Type: "ReverseSequence",
48901		Input: []tf.Input{
48902			input, seq_lengths,
48903		},
48904		Attrs: attrs,
48905	}
48906	op := scope.AddOperation(opspec)
48907	return op.Output(0)
48908}
48909
48910// DataServiceDatasetV2Attr is an optional argument to DataServiceDatasetV2.
48911type DataServiceDatasetV2Attr func(optionalAttr)
48912
48913// DataServiceDatasetV2TaskRefreshIntervalHintMs sets the optional task_refresh_interval_hint_ms attribute to value.
48914// If not specified, defaults to -1
48915func DataServiceDatasetV2TaskRefreshIntervalHintMs(value int64) DataServiceDatasetV2Attr {
48916	return func(m optionalAttr) {
48917		m["task_refresh_interval_hint_ms"] = value
48918	}
48919}
48920
48921// DataServiceDatasetV2DataTransferProtocol sets the optional data_transfer_protocol attribute to value.
48922// If not specified, defaults to ""
48923func DataServiceDatasetV2DataTransferProtocol(value string) DataServiceDatasetV2Attr {
48924	return func(m optionalAttr) {
48925		m["data_transfer_protocol"] = value
48926	}
48927}
48928
48929// Creates a dataset that reads data from the tf.data service.
48930func DataServiceDatasetV2(scope *Scope, dataset_id tf.Output, processing_mode tf.Output, address tf.Output, protocol tf.Output, job_name tf.Output, consumer_index tf.Output, num_consumers tf.Output, max_outstanding_requests tf.Output, iteration_counter tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...DataServiceDatasetV2Attr) (handle tf.Output) {
48931	if scope.Err() != nil {
48932		return
48933	}
48934	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
48935	for _, a := range optional {
48936		a(attrs)
48937	}
48938	opspec := tf.OpSpec{
48939		Type: "DataServiceDatasetV2",
48940		Input: []tf.Input{
48941			dataset_id, processing_mode, address, protocol, job_name, consumer_index, num_consumers, max_outstanding_requests, iteration_counter,
48942		},
48943		Attrs: attrs,
48944	}
48945	op := scope.AddOperation(opspec)
48946	return op.Output(0)
48947}
48948
48949// Fetches multiple values from infeed as an XLA tuple.
48950//
48951// Arguments:
48952//	dtypes: The element types of each element in `outputs`.
48953//	shapes: The shapes of each tensor in `outputs`.
48954//
48955// Returns A list of tensors that will be provided using the infeed mechanism.
48956func InfeedDequeueTuple(scope *Scope, dtypes []tf.DataType, shapes []tf.Shape) (outputs []tf.Output) {
48957	if scope.Err() != nil {
48958		return
48959	}
48960	attrs := map[string]interface{}{"dtypes": dtypes, "shapes": shapes}
48961	opspec := tf.OpSpec{
48962		Type: "InfeedDequeueTuple",
48963
48964		Attrs: attrs,
48965	}
48966	op := scope.AddOperation(opspec)
48967	if scope.Err() != nil {
48968		return
48969	}
48970	var idx int
48971	var err error
48972	if outputs, idx, err = makeOutputList(op, idx, "outputs"); err != nil {
48973		scope.UpdateErr("InfeedDequeueTuple", err)
48974		return
48975	}
48976	return outputs
48977}
48978
48979// Serializes the tree ensemble to a proto.
48980//
48981// Arguments:
48982//	tree_ensemble_handle: Handle to the tree ensemble.
48983//
48984// Returns:
48985//	stamp_token: Stamp token of the tree ensemble resource.
48986//	tree_ensemble_serialized: Serialized proto of the ensemble.
48987func BoostedTreesSerializeEnsemble(scope *Scope, tree_ensemble_handle tf.Output) (stamp_token tf.Output, tree_ensemble_serialized tf.Output) {
48988	if scope.Err() != nil {
48989		return
48990	}
48991	opspec := tf.OpSpec{
48992		Type: "BoostedTreesSerializeEnsemble",
48993		Input: []tf.Input{
48994			tree_ensemble_handle,
48995		},
48996	}
48997	op := scope.AddOperation(opspec)
48998	return op.Output(0), op.Output(1)
48999}
49000
49001// Computes inverse hyperbolic cosine of x element-wise.
49002//
49003// Given an input tensor, the function computes inverse hyperbolic cosine of every element.
49004// Input range is `[1, inf]`. It returns `nan` if the input lies outside the range.
49005//
49006// ```python
49007// x = tf.constant([-2, -0.5, 1, 1.2, 200, 10000, float("inf")])
49008// tf.math.acosh(x) ==> [nan nan 0. 0.62236255 5.9914584 9.903487 inf]
49009// ```
49010func Acosh(scope *Scope, x tf.Output) (y tf.Output) {
49011	if scope.Err() != nil {
49012		return
49013	}
49014	opspec := tf.OpSpec{
49015		Type: "Acosh",
49016		Input: []tf.Input{
49017			x,
49018		},
49019	}
49020	op := scope.AddOperation(opspec)
49021	return op.Output(0)
49022}
49023
49024// Outputs deterministic pseudorandom random numbers from a gamma distribution.
49025//
49026// Outputs random values from a gamma distribution.
49027//
49028// The outputs are a deterministic function of `shape`, `seed`, and `alpha`.
49029//
49030// Arguments:
49031//	shape: The shape of the output tensor.
49032//	seed: 2 seeds (shape [2]).
49033//	alpha: The concentration of the gamma distribution. Shape must match the rightmost
49034// dimensions of `shape`.
49035//
49036// Returns Random values with specified shape.
49037func StatelessRandomGammaV2(scope *Scope, shape tf.Output, seed tf.Output, alpha tf.Output) (output tf.Output) {
49038	if scope.Err() != nil {
49039		return
49040	}
49041	opspec := tf.OpSpec{
49042		Type: "StatelessRandomGammaV2",
49043		Input: []tf.Input{
49044			shape, seed, alpha,
49045		},
49046	}
49047	op := scope.AddOperation(opspec)
49048	return op.Output(0)
49049}
49050
49051//   Combines (nests of) input elements into a dataset of (nests of) windows.
49052//
49053//   A "window" is a finite dataset of flat elements of size `size` (or possibly
49054//   fewer if there are not enough input elements to fill the window and
49055//   `drop_remainder` evaluates to false).
49056//
49057//   The `shift` argument determines the number of input elements by which
49058//   the window moves on each iteration.  The first element in the `k`th window
49059//   will be element
49060//
49061//   ```
49062//   1 + (k-1) * shift
49063//   ```
49064//
49065//   of the input dataset. In particular, the first element of the first window
49066//   will always be the first element of the input dataset.
49067//
49068//   If the `stride` parameter is greater than 1, then each window will skip
49069//   `(stride - 1)` input elements between each element that appears in the
49070//   window. Output windows will still contain `size` elements regardless of
49071//   the value of `stride`.
49072//
49073//   The `stride` argument determines the stride of the input elements, and the
49074//   `shift` argument determines the shift of the window.
49075//
49076//   For example, letting `{...}` to represent a Dataset:
49077//
49078//   - `tf.data.Dataset.range(7).window(2)` produces
49079//     `{{0, 1}, {2, 3}, {4, 5}, {6}}`
49080//   - `tf.data.Dataset.range(7).window(3, 2, 1, True)` produces
49081//     `{{0, 1, 2}, {2, 3, 4}, {4, 5, 6}}`
49082//   - `tf.data.Dataset.range(7).window(3, 1, 2, True)` produces
49083//     `{{0, 2, 4}, {1, 3, 5}, {2, 4, 6}}`
49084//
49085//   Note that when the `window` transformation is applied to a dataset of
49086//   nested elements, it produces a dataset of nested windows.
49087//
49088//   For example:
49089//
49090//   - `tf.data.Dataset.from_tensor_slices((range(4), range(4))).window(2)`
49091//     produces `{({0, 1}, {0, 1}), ({2, 3}, {2, 3})}`
49092//   - `tf.data.Dataset.from_tensor_slices({"a": range(4)}).window(2)`
49093//     produces `{{"a": {0, 1}}, {"a": {2, 3}}}`
49094//
49095// Arguments:
49096//
49097//	size: An integer scalar, representing the number of elements
49098// of the input dataset to combine into a window. Must be positive.
49099//	shift: An integer scalar, representing the number of input elements
49100// by which the window moves in each iteration.  Defaults to `size`.
49101// Must be positive.
49102//	stride: An integer scalar, representing the stride of the input elements
49103// in the sliding window. Must be positive. The default value of 1 means
49104// "retain every input element".
49105//	drop_remainder: A Boolean scalar, representing whether the last window should be
49106// dropped if its size is smaller than `window_size`.
49107//
49108//
49109func WindowDataset(scope *Scope, input_dataset tf.Output, size tf.Output, shift tf.Output, stride tf.Output, drop_remainder tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
49110	if scope.Err() != nil {
49111		return
49112	}
49113	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
49114	opspec := tf.OpSpec{
49115		Type: "WindowDataset",
49116		Input: []tf.Input{
49117			input_dataset, size, shift, stride, drop_remainder,
49118		},
49119		Attrs: attrs,
49120	}
49121	op := scope.AddOperation(opspec)
49122	return op.Output(0)
49123}
49124
49125// SetSizeAttr is an optional argument to SetSize.
49126type SetSizeAttr func(optionalAttr)
49127
49128// SetSizeValidateIndices sets the optional validate_indices attribute to value.
49129// If not specified, defaults to true
49130func SetSizeValidateIndices(value bool) SetSizeAttr {
49131	return func(m optionalAttr) {
49132		m["validate_indices"] = value
49133	}
49134}
49135
49136// Number of unique elements along last dimension of input `set`.
49137//
49138// Input `set` is a `SparseTensor` represented by `set_indices`, `set_values`,
49139// and `set_shape`. The last dimension contains values in a set, duplicates are
49140// allowed but ignored.
49141//
49142// If `validate_indices` is `True`, this op validates the order and range of `set`
49143// indices.
49144//
49145// Arguments:
49146//	set_indices: 2D `Tensor`, indices of a `SparseTensor`.
49147//	set_values: 1D `Tensor`, values of a `SparseTensor`.
49148//	set_shape: 1D `Tensor`, shape of a `SparseTensor`.
49149//
49150// Returns For `set` ranked `n`, this is a `Tensor` with rank `n-1`, and the same 1st
49151// `n-1` dimensions as `set`. Each value is the number of unique elements in
49152// the corresponding `[0...n-1]` dimension of `set`.
49153func SetSize(scope *Scope, set_indices tf.Output, set_values tf.Output, set_shape tf.Output, optional ...SetSizeAttr) (size tf.Output) {
49154	if scope.Err() != nil {
49155		return
49156	}
49157	attrs := map[string]interface{}{}
49158	for _, a := range optional {
49159		a(attrs)
49160	}
49161	opspec := tf.OpSpec{
49162		Type: "SetSize",
49163		Input: []tf.Input{
49164			set_indices, set_values, set_shape,
49165		},
49166		Attrs: attrs,
49167	}
49168	op := scope.AddOperation(opspec)
49169	return op.Output(0)
49170}
49171
49172// LoadTPUEmbeddingRMSPropParametersAttr is an optional argument to LoadTPUEmbeddingRMSPropParameters.
49173type LoadTPUEmbeddingRMSPropParametersAttr func(optionalAttr)
49174
49175// LoadTPUEmbeddingRMSPropParametersTableId sets the optional table_id attribute to value.
49176// If not specified, defaults to -1
49177func LoadTPUEmbeddingRMSPropParametersTableId(value int64) LoadTPUEmbeddingRMSPropParametersAttr {
49178	return func(m optionalAttr) {
49179		m["table_id"] = value
49180	}
49181}
49182
49183// LoadTPUEmbeddingRMSPropParametersTableName sets the optional table_name attribute to value.
49184// If not specified, defaults to ""
49185func LoadTPUEmbeddingRMSPropParametersTableName(value string) LoadTPUEmbeddingRMSPropParametersAttr {
49186	return func(m optionalAttr) {
49187		m["table_name"] = value
49188	}
49189}
49190
49191// LoadTPUEmbeddingRMSPropParametersConfig sets the optional config attribute to value.
49192// If not specified, defaults to ""
49193func LoadTPUEmbeddingRMSPropParametersConfig(value string) LoadTPUEmbeddingRMSPropParametersAttr {
49194	return func(m optionalAttr) {
49195		m["config"] = value
49196	}
49197}
49198
49199// Load RMSProp embedding parameters.
49200//
49201// An op that loads optimization parameters into HBM for embedding. Must be
49202// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
49203// embedding table configuration. For example, this op is used to install
49204// parameters that are loaded from a checkpoint before a training loop is
49205// executed.
49206//
49207// Arguments:
49208//	parameters: Value of parameters used in the RMSProp optimization algorithm.
49209//	ms: Value of ms used in the RMSProp optimization algorithm.
49210//	mom: Value of mom used in the RMSProp optimization algorithm.
49211//
49212//
49213//
49214// Returns the created operation.
49215func LoadTPUEmbeddingRMSPropParameters(scope *Scope, parameters tf.Output, ms tf.Output, mom tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingRMSPropParametersAttr) (o *tf.Operation) {
49216	if scope.Err() != nil {
49217		return
49218	}
49219	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
49220	for _, a := range optional {
49221		a(attrs)
49222	}
49223	opspec := tf.OpSpec{
49224		Type: "LoadTPUEmbeddingRMSPropParameters",
49225		Input: []tf.Input{
49226			parameters, ms, mom,
49227		},
49228		Attrs: attrs,
49229	}
49230	return scope.AddOperation(opspec)
49231}
49232
49233// CollectiveBcastSendV2Attr is an optional argument to CollectiveBcastSendV2.
49234type CollectiveBcastSendV2Attr func(optionalAttr)
49235
49236// CollectiveBcastSendV2CommunicationHint sets the optional communication_hint attribute to value.
49237// If not specified, defaults to "auto"
49238func CollectiveBcastSendV2CommunicationHint(value string) CollectiveBcastSendV2Attr {
49239	return func(m optionalAttr) {
49240		m["communication_hint"] = value
49241	}
49242}
49243
49244// CollectiveBcastSendV2TimeoutSeconds sets the optional timeout_seconds attribute to value.
49245// If not specified, defaults to 0
49246func CollectiveBcastSendV2TimeoutSeconds(value float32) CollectiveBcastSendV2Attr {
49247	return func(m optionalAttr) {
49248		m["timeout_seconds"] = value
49249	}
49250}
49251
49252// Broadcasts a tensor value to one or more other devices.
49253func CollectiveBcastSendV2(scope *Scope, input tf.Output, group_size tf.Output, group_key tf.Output, instance_key tf.Output, optional ...CollectiveBcastSendV2Attr) (data tf.Output) {
49254	if scope.Err() != nil {
49255		return
49256	}
49257	attrs := map[string]interface{}{}
49258	for _, a := range optional {
49259		a(attrs)
49260	}
49261	opspec := tf.OpSpec{
49262		Type: "CollectiveBcastSendV2",
49263		Input: []tf.Input{
49264			input, group_size, group_key, instance_key,
49265		},
49266		Attrs: attrs,
49267	}
49268	op := scope.AddOperation(opspec)
49269	return op.Output(0)
49270}
49271
49272// InfeedEnqueueTupleAttr is an optional argument to InfeedEnqueueTuple.
49273type InfeedEnqueueTupleAttr func(optionalAttr)
49274
49275// InfeedEnqueueTupleLayouts sets the optional layouts attribute to value.
49276//
49277// value: A vector holding the requested layout in minor-to-major sequence for
49278// all the tuple shapes, in the order the shapes appear in the "shapes" input.
49279// The layout elements for a sub-shape can be set to -1, in which case the
49280// corresponding layout will be computed by the infeed operation.
49281// If not specified, defaults to <>
49282func InfeedEnqueueTupleLayouts(value []int64) InfeedEnqueueTupleAttr {
49283	return func(m optionalAttr) {
49284		m["layouts"] = value
49285	}
49286}
49287
49288// InfeedEnqueueTupleDeviceOrdinal sets the optional device_ordinal attribute to value.
49289//
49290// value: The TPU device to use. This should be -1 when the Op
49291// is running on a TPU device, and >= 0 when the Op is running on the CPU
49292// device.
49293// If not specified, defaults to -1
49294func InfeedEnqueueTupleDeviceOrdinal(value int64) InfeedEnqueueTupleAttr {
49295	return func(m optionalAttr) {
49296		m["device_ordinal"] = value
49297	}
49298}
49299
49300// Feeds multiple Tensor values into the computation as an XLA tuple.
49301//
49302// Arguments:
49303//	inputs: A list of tensors that will be provided using the infeed mechanism.
49304//	shapes: The shapes of each tensor in `inputs`.
49305//
49306// Returns the created operation.
49307func InfeedEnqueueTuple(scope *Scope, inputs []tf.Output, shapes []tf.Shape, optional ...InfeedEnqueueTupleAttr) (o *tf.Operation) {
49308	if scope.Err() != nil {
49309		return
49310	}
49311	attrs := map[string]interface{}{"shapes": shapes}
49312	for _, a := range optional {
49313		a(attrs)
49314	}
49315	opspec := tf.OpSpec{
49316		Type: "InfeedEnqueueTuple",
49317		Input: []tf.Input{
49318			tf.OutputList(inputs),
49319		},
49320		Attrs: attrs,
49321	}
49322	return scope.AddOperation(opspec)
49323}
49324
49325// MapClearAttr is an optional argument to MapClear.
49326type MapClearAttr func(optionalAttr)
49327
49328// MapClearCapacity sets the optional capacity attribute to value.
49329// If not specified, defaults to 0
49330//
49331// REQUIRES: value >= 0
49332func MapClearCapacity(value int64) MapClearAttr {
49333	return func(m optionalAttr) {
49334		m["capacity"] = value
49335	}
49336}
49337
49338// MapClearMemoryLimit sets the optional memory_limit attribute to value.
49339// If not specified, defaults to 0
49340//
49341// REQUIRES: value >= 0
49342func MapClearMemoryLimit(value int64) MapClearAttr {
49343	return func(m optionalAttr) {
49344		m["memory_limit"] = value
49345	}
49346}
49347
49348// MapClearContainer sets the optional container attribute to value.
49349// If not specified, defaults to ""
49350func MapClearContainer(value string) MapClearAttr {
49351	return func(m optionalAttr) {
49352		m["container"] = value
49353	}
49354}
49355
49356// MapClearSharedName sets the optional shared_name attribute to value.
49357// If not specified, defaults to ""
49358func MapClearSharedName(value string) MapClearAttr {
49359	return func(m optionalAttr) {
49360		m["shared_name"] = value
49361	}
49362}
49363
49364// Op removes all elements in the underlying container.
49365//
49366// Returns the created operation.
49367func MapClear(scope *Scope, dtypes []tf.DataType, optional ...MapClearAttr) (o *tf.Operation) {
49368	if scope.Err() != nil {
49369		return
49370	}
49371	attrs := map[string]interface{}{"dtypes": dtypes}
49372	for _, a := range optional {
49373		a(attrs)
49374	}
49375	opspec := tf.OpSpec{
49376		Type: "MapClear",
49377
49378		Attrs: attrs,
49379	}
49380	return scope.AddOperation(opspec)
49381}
49382
49383// Deserialize `SparseTensor` objects.
49384//
49385// The input `serialized_sparse` must have the shape `[?, ?, ..., ?, 3]` where
49386// the last dimension stores serialized `SparseTensor` objects and the other N
49387// dimensions (N >= 0) correspond to a batch. The ranks of the original
49388// `SparseTensor` objects must all match. When the final `SparseTensor` is
49389// created, its rank is the rank of the incoming `SparseTensor` objects plus N;
49390// the sparse tensors have been concatenated along new dimensions, one for each
49391// batch.
49392//
49393// The output `SparseTensor` object's shape values for the original dimensions
49394// are the max across the input `SparseTensor` objects' shape values for the
49395// corresponding dimensions. The new dimensions match the size of the batch.
49396//
49397// The input `SparseTensor` objects' indices are assumed ordered in
49398// standard lexicographic order.  If this is not the case, after this
49399// step run `SparseReorder` to restore index ordering.
49400//
49401// For example, if the serialized input is a `[2 x 3]` matrix representing two
49402// original `SparseTensor` objects:
49403//
49404//     index = [ 0]
49405//             [10]
49406//             [20]
49407//     values = [1, 2, 3]
49408//     shape = [50]
49409//
49410// and
49411//
49412//     index = [ 2]
49413//             [10]
49414//     values = [4, 5]
49415//     shape = [30]
49416//
49417// then the final deserialized `SparseTensor` will be:
49418//
49419//     index = [0  0]
49420//             [0 10]
49421//             [0 20]
49422//             [1  2]
49423//             [1 10]
49424//     values = [1, 2, 3, 4, 5]
49425//     shape = [2 50]
49426//
49427// Arguments:
49428//	serialized_sparse: The serialized `SparseTensor` objects. The last dimension
49429// must have 3 columns.
49430//	dtype: The `dtype` of the serialized `SparseTensor` objects.
49431func DeserializeSparse(scope *Scope, serialized_sparse tf.Output, dtype tf.DataType) (sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output) {
49432	if scope.Err() != nil {
49433		return
49434	}
49435	attrs := map[string]interface{}{"dtype": dtype}
49436	opspec := tf.OpSpec{
49437		Type: "DeserializeSparse",
49438		Input: []tf.Input{
49439			serialized_sparse,
49440		},
49441		Attrs: attrs,
49442	}
49443	op := scope.AddOperation(opspec)
49444	return op.Output(0), op.Output(1), op.Output(2)
49445}
49446
49447// Decode web-safe base64-encoded strings.
49448//
49449// Input may or may not have padding at the end. See EncodeBase64 for padding.
49450// Web-safe means that input must use - and _ instead of + and /.
49451//
49452// Arguments:
49453//	input: Base64 strings to decode.
49454//
49455// Returns Decoded strings.
49456func DecodeBase64(scope *Scope, input tf.Output) (output tf.Output) {
49457	if scope.Err() != nil {
49458		return
49459	}
49460	opspec := tf.OpSpec{
49461		Type: "DecodeBase64",
49462		Input: []tf.Input{
49463			input,
49464		},
49465	}
49466	op := scope.AddOperation(opspec)
49467	return op.Output(0)
49468}
49469
49470// LoadTPUEmbeddingAdagradParametersAttr is an optional argument to LoadTPUEmbeddingAdagradParameters.
49471type LoadTPUEmbeddingAdagradParametersAttr func(optionalAttr)
49472
49473// LoadTPUEmbeddingAdagradParametersTableId sets the optional table_id attribute to value.
49474// If not specified, defaults to -1
49475func LoadTPUEmbeddingAdagradParametersTableId(value int64) LoadTPUEmbeddingAdagradParametersAttr {
49476	return func(m optionalAttr) {
49477		m["table_id"] = value
49478	}
49479}
49480
49481// LoadTPUEmbeddingAdagradParametersTableName sets the optional table_name attribute to value.
49482// If not specified, defaults to ""
49483func LoadTPUEmbeddingAdagradParametersTableName(value string) LoadTPUEmbeddingAdagradParametersAttr {
49484	return func(m optionalAttr) {
49485		m["table_name"] = value
49486	}
49487}
49488
49489// LoadTPUEmbeddingAdagradParametersConfig sets the optional config attribute to value.
49490// If not specified, defaults to ""
49491func LoadTPUEmbeddingAdagradParametersConfig(value string) LoadTPUEmbeddingAdagradParametersAttr {
49492	return func(m optionalAttr) {
49493		m["config"] = value
49494	}
49495}
49496
49497// Load Adagrad embedding parameters.
49498//
49499// An op that loads optimization parameters into HBM for embedding. Must be
49500// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
49501// embedding table configuration. For example, this op is used to install
49502// parameters that are loaded from a checkpoint before a training loop is
49503// executed.
49504//
49505// Arguments:
49506//	parameters: Value of parameters used in the Adagrad optimization algorithm.
49507//	accumulators: Value of accumulators used in the Adagrad optimization algorithm.
49508//
49509//
49510//
49511// Returns the created operation.
49512func LoadTPUEmbeddingAdagradParameters(scope *Scope, parameters tf.Output, accumulators tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingAdagradParametersAttr) (o *tf.Operation) {
49513	if scope.Err() != nil {
49514		return
49515	}
49516	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
49517	for _, a := range optional {
49518		a(attrs)
49519	}
49520	opspec := tf.OpSpec{
49521		Type: "LoadTPUEmbeddingAdagradParameters",
49522		Input: []tf.Input{
49523			parameters, accumulators,
49524		},
49525		Attrs: attrs,
49526	}
49527	return scope.AddOperation(opspec)
49528}
49529
49530// DebugNumericSummaryAttr is an optional argument to DebugNumericSummary.
49531type DebugNumericSummaryAttr func(optionalAttr)
49532
49533// DebugNumericSummaryDeviceName sets the optional device_name attribute to value.
49534// If not specified, defaults to ""
49535func DebugNumericSummaryDeviceName(value string) DebugNumericSummaryAttr {
49536	return func(m optionalAttr) {
49537		m["device_name"] = value
49538	}
49539}
49540
49541// DebugNumericSummaryTensorName sets the optional tensor_name attribute to value.
49542//
49543// value: Name of the input tensor.
49544// If not specified, defaults to ""
49545func DebugNumericSummaryTensorName(value string) DebugNumericSummaryAttr {
49546	return func(m optionalAttr) {
49547		m["tensor_name"] = value
49548	}
49549}
49550
49551// DebugNumericSummaryDebugUrls sets the optional debug_urls attribute to value.
49552//
49553// value: List of URLs to debug targets, e.g.,
49554//   file:///foo/tfdbg_dump, grpc:://localhost:11011.
49555// If not specified, defaults to <>
49556func DebugNumericSummaryDebugUrls(value []string) DebugNumericSummaryAttr {
49557	return func(m optionalAttr) {
49558		m["debug_urls"] = value
49559	}
49560}
49561
49562// DebugNumericSummaryLowerBound sets the optional lower_bound attribute to value.
49563//
49564// value: (float) The lower bound <= which values will be included in the
49565//   generalized -inf count. Default: -inf.
49566// If not specified, defaults to -inf
49567func DebugNumericSummaryLowerBound(value float32) DebugNumericSummaryAttr {
49568	return func(m optionalAttr) {
49569		m["lower_bound"] = value
49570	}
49571}
49572
49573// DebugNumericSummaryUpperBound sets the optional upper_bound attribute to value.
49574//
49575// value: (float) The upper bound >= which values will be included in the
49576//   generalized +inf count. Default: +inf.
49577// If not specified, defaults to inf
49578func DebugNumericSummaryUpperBound(value float32) DebugNumericSummaryAttr {
49579	return func(m optionalAttr) {
49580		m["upper_bound"] = value
49581	}
49582}
49583
49584// DebugNumericSummaryMuteIfHealthy sets the optional mute_if_healthy attribute to value.
49585//
49586// value: (bool) Do not send data to the debug URLs unless at least one
49587//   of elements [2], [3] and [7] (i.e., the nan count and the generalized -inf and
49588//   inf counts) is non-zero.
49589// If not specified, defaults to false
49590func DebugNumericSummaryMuteIfHealthy(value bool) DebugNumericSummaryAttr {
49591	return func(m optionalAttr) {
49592		m["mute_if_healthy"] = value
49593	}
49594}
49595
49596// DebugNumericSummaryGatedGrpc sets the optional gated_grpc attribute to value.
49597//
49598// value: Whether this op will be gated. If any of the debug_urls of this
49599//   debug node is of the grpc:// scheme, when the value of this attribute is set
49600//   to True, the data will not actually be sent via the grpc stream unless this
49601//   debug op has been enabled at the debug_url. If all of the debug_urls of this
49602//   debug node are of the grpc:// scheme and the debug op is enabled at none of
49603//   them, the output will be an empty Tensor.
49604// If not specified, defaults to false
49605func DebugNumericSummaryGatedGrpc(value bool) DebugNumericSummaryAttr {
49606	return func(m optionalAttr) {
49607		m["gated_grpc"] = value
49608	}
49609}
49610
49611// Debug Numeric Summary Op.
49612//
49613// Provide a basic summary of numeric value types, range and distribution.
49614//
49615// output: A double tensor of shape [14 + nDimensions], where nDimensions is the
49616//   number of dimensions of the tensor's shape. The elements of output are:
49617//   [0]: is initialized (1.0) or not (0.0).
49618//   [1]: total number of elements
49619//   [2]: NaN element count
49620//   [3]: generalized -inf count: elements <= lower_bound. lower_bound is -inf by
49621//     default.
49622//   [4]: negative element count (excluding -inf), if lower_bound is the default
49623//     -inf. Otherwise, this is the count of elements > lower_bound and < 0.
49624//   [5]: zero element count
49625//   [6]: positive element count (excluding +inf), if upper_bound is the default
49626//     +inf. Otherwise, this is the count of elements < upper_bound and > 0.
49627//   [7]: generalized +inf count, elements >= upper_bound. upper_bound is +inf by
49628//     default.
49629// Output elements [1:8] are all zero, if the tensor is uninitialized.
49630//   [8]: minimum of all non-inf and non-NaN elements.
49631//        If uninitialized or no such element exists: +inf.
49632//   [9]: maximum of all non-inf and non-NaN elements.
49633//        If uninitialized or no such element exists: -inf.
49634//   [10]: mean of all non-inf and non-NaN elements.
49635//         If uninitialized or no such element exists: NaN.
49636//   [11]: variance of all non-inf and non-NaN elements.
49637//         If uninitialized or no such element exists: NaN.
49638//   [12]: Data type of the tensor encoded as an enum integer. See the DataType
49639//         proto for more details.
49640//   [13]: Number of dimensions of the tensor (ndims).
49641//   [14+]: Sizes of the dimensions.
49642//
49643//
49644// Arguments:
49645//	input: Input tensor, non-Reference type.
49646func DebugNumericSummary(scope *Scope, input tf.Output, optional ...DebugNumericSummaryAttr) (output tf.Output) {
49647	if scope.Err() != nil {
49648		return
49649	}
49650	attrs := map[string]interface{}{}
49651	for _, a := range optional {
49652		a(attrs)
49653	}
49654	opspec := tf.OpSpec{
49655		Type: "DebugNumericSummary",
49656		Input: []tf.Input{
49657			input,
49658		},
49659		Attrs: attrs,
49660	}
49661	op := scope.AddOperation(opspec)
49662	return op.Output(0)
49663}
49664
49665// Outputs random integers from a uniform distribution.
49666//
49667// The generated values are uniform integers in the range `[minval, maxval)`.
49668// The lower bound `minval` is included in the range, while the upper bound
49669// `maxval` is excluded.
49670//
49671// The random integers are slightly biased unless `maxval - minval` is an exact
49672// power of two.  The bias is small for values of `maxval - minval` significantly
49673// smaller than the range of the output (either `2^32` or `2^64`).
49674//
49675// Arguments:
49676//	resource: The handle of the resource variable that stores the state of the RNG.
49677//	algorithm: The RNG algorithm.
49678//	shape: The shape of the output tensor.
49679//	minval: Minimum value (inclusive, scalar).
49680//	maxval: Maximum value (exclusive, scalar).
49681//
49682// Returns Random values with specified shape.
49683func StatefulUniformInt(scope *Scope, resource tf.Output, algorithm tf.Output, shape tf.Output, minval tf.Output, maxval tf.Output) (output tf.Output) {
49684	if scope.Err() != nil {
49685		return
49686	}
49687	opspec := tf.OpSpec{
49688		Type: "StatefulUniformInt",
49689		Input: []tf.Input{
49690			resource, algorithm, shape, minval, maxval,
49691		},
49692	}
49693	op := scope.AddOperation(opspec)
49694	return op.Output(0)
49695}
49696
49697// SerializeManySparseAttr is an optional argument to SerializeManySparse.
49698type SerializeManySparseAttr func(optionalAttr)
49699
49700// SerializeManySparseOutType sets the optional out_type attribute to value.
49701//
49702// value: The `dtype` to use for serialization; the supported types are `string`
49703// (default) and `variant`.
49704// If not specified, defaults to DT_STRING
49705func SerializeManySparseOutType(value tf.DataType) SerializeManySparseAttr {
49706	return func(m optionalAttr) {
49707		m["out_type"] = value
49708	}
49709}
49710
49711// Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor` object.
49712//
49713// The `SparseTensor` must have rank `R` greater than 1, and the first dimension
49714// is treated as the minibatch dimension.  Elements of the `SparseTensor`
49715// must be sorted in increasing order of this first dimension.  The serialized
49716// `SparseTensor` objects going into each row of `serialized_sparse` will have
49717// rank `R-1`.
49718//
49719// The minibatch size `N` is extracted from `sparse_shape[0]`.
49720//
49721// Arguments:
49722//	sparse_indices: 2-D.  The `indices` of the minibatch `SparseTensor`.
49723//	sparse_values: 1-D.  The `values` of the minibatch `SparseTensor`.
49724//	sparse_shape: 1-D.  The `shape` of the minibatch `SparseTensor`.
49725func SerializeManySparse(scope *Scope, sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output, optional ...SerializeManySparseAttr) (serialized_sparse tf.Output) {
49726	if scope.Err() != nil {
49727		return
49728	}
49729	attrs := map[string]interface{}{}
49730	for _, a := range optional {
49731		a(attrs)
49732	}
49733	opspec := tf.OpSpec{
49734		Type: "SerializeManySparse",
49735		Input: []tf.Input{
49736			sparse_indices, sparse_values, sparse_shape,
49737		},
49738		Attrs: attrs,
49739	}
49740	op := scope.AddOperation(opspec)
49741	return op.Output(0)
49742}
49743
49744// TPUPartitionedOutputAttr is an optional argument to TPUPartitionedOutput.
49745type TPUPartitionedOutputAttr func(optionalAttr)
49746
49747// TPUPartitionedOutputPartitionDim sets the optional partition_dim attribute to value.
49748//
49749// value: An integer describles which dimension is partitioned.
49750// If not specified, defaults to 0
49751func TPUPartitionedOutputPartitionDim(value int64) TPUPartitionedOutputAttr {
49752	return func(m optionalAttr) {
49753		m["partition_dim"] = value
49754	}
49755}
49756
49757// An op that demultiplexes a tensor to be sharded by XLA to a list of partitioned
49758//
49759// outputs outside the XLA computation.
49760//
49761// Arguments:
49762//	inputs: A tensor which represents the full shape of partitioned tensors.
49763//
49764//
49765// Returns A list of partitioned inputs which must have the same shape.
49766func TPUPartitionedOutput(scope *Scope, inputs tf.Output, num_splits int64, optional ...TPUPartitionedOutputAttr) (output []tf.Output) {
49767	if scope.Err() != nil {
49768		return
49769	}
49770	attrs := map[string]interface{}{"num_splits": num_splits}
49771	for _, a := range optional {
49772		a(attrs)
49773	}
49774	opspec := tf.OpSpec{
49775		Type: "TPUPartitionedOutput",
49776		Input: []tf.Input{
49777			inputs,
49778		},
49779		Attrs: attrs,
49780	}
49781	op := scope.AddOperation(opspec)
49782	if scope.Err() != nil {
49783		return
49784	}
49785	var idx int
49786	var err error
49787	if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
49788		scope.UpdateErr("TPUPartitionedOutput", err)
49789		return
49790	}
49791	return output
49792}
49793
49794// RequantizePerChannelAttr is an optional argument to RequantizePerChannel.
49795type RequantizePerChannelAttr func(optionalAttr)
49796
49797// RequantizePerChannelOutType sets the optional out_type attribute to value.
49798//
49799// value: The quantized type of output tensor that needs to be converted.
49800// If not specified, defaults to DT_QUINT8
49801func RequantizePerChannelOutType(value tf.DataType) RequantizePerChannelAttr {
49802	return func(m optionalAttr) {
49803		m["out_type"] = value
49804	}
49805}
49806
49807// Requantizes input with min and max values known per channel.
49808//
49809// Arguments:
49810//	input: The original input tensor.
49811//	input_min: The minimum value of the input tensor
49812//	input_max: The maximum value of the input tensor.
49813//	requested_output_min: The minimum value of the output tensor requested.
49814//	requested_output_max: The maximum value of the output tensor requested.
49815//
49816// Returns:
49817//	output: Output tensor.
49818//	output_min: The minimum value of the final output tensor
49819//	output_max: The maximum value of the final output tensor.
49820func RequantizePerChannel(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, requested_output_min tf.Output, requested_output_max tf.Output, optional ...RequantizePerChannelAttr) (output tf.Output, output_min tf.Output, output_max tf.Output) {
49821	if scope.Err() != nil {
49822		return
49823	}
49824	attrs := map[string]interface{}{}
49825	for _, a := range optional {
49826		a(attrs)
49827	}
49828	opspec := tf.OpSpec{
49829		Type: "RequantizePerChannel",
49830		Input: []tf.Input{
49831			input, input_min, input_max, requested_output_min, requested_output_max,
49832		},
49833		Attrs: attrs,
49834	}
49835	op := scope.AddOperation(opspec)
49836	return op.Output(0), op.Output(1), op.Output(2)
49837}
49838
49839// LeakyReluAttr is an optional argument to LeakyRelu.
49840type LeakyReluAttr func(optionalAttr)
49841
49842// LeakyReluAlpha sets the optional alpha attribute to value.
49843// If not specified, defaults to 0.2
49844func LeakyReluAlpha(value float32) LeakyReluAttr {
49845	return func(m optionalAttr) {
49846		m["alpha"] = value
49847	}
49848}
49849
49850// Computes rectified linear: `max(features, features * alpha)`.
49851func LeakyRelu(scope *Scope, features tf.Output, optional ...LeakyReluAttr) (activations tf.Output) {
49852	if scope.Err() != nil {
49853		return
49854	}
49855	attrs := map[string]interface{}{}
49856	for _, a := range optional {
49857		a(attrs)
49858	}
49859	opspec := tf.OpSpec{
49860		Type: "LeakyRelu",
49861		Input: []tf.Input{
49862			features,
49863		},
49864		Attrs: attrs,
49865	}
49866	op := scope.AddOperation(opspec)
49867	return op.Output(0)
49868}
49869
49870// Generates values in an interval.
49871//
49872// A sequence of `num` evenly-spaced values are generated beginning at `start`.
49873// If `num > 1`, the values in the sequence increase by `stop - start / num - 1`,
49874// so that the last one is exactly `stop`.
49875//
49876// For example:
49877//
49878// ```
49879// tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0  11.0  12.0]
49880// ```
49881//
49882// Arguments:
49883//	start: 0-D tensor. First entry in the range.
49884//	stop: 0-D tensor. Last entry in the range.
49885//	num: 0-D tensor. Number of values to generate.
49886//
49887// Returns 1-D. The generated values.
49888func LinSpace(scope *Scope, start tf.Output, stop tf.Output, num tf.Output) (output tf.Output) {
49889	if scope.Err() != nil {
49890		return
49891	}
49892	opspec := tf.OpSpec{
49893		Type: "LinSpace",
49894		Input: []tf.Input{
49895			start, stop, num,
49896		},
49897	}
49898	op := scope.AddOperation(opspec)
49899	return op.Output(0)
49900}
49901
49902// Creates a dataset that caches elements from `input_dataset`.
49903//
49904// A CacheDataset will iterate over the input_dataset, and store tensors. If the
49905// cache already exists, the cache will be used. If the cache is inappropriate
49906// (e.g. cannot be opened, contains tensors of the wrong shape / size), an error
49907// will the returned when used.
49908//
49909// Arguments:
49910//
49911//	filename: A path on the filesystem where we should cache the dataset. Note: this
49912// will be a directory.
49913//
49914//
49915func CacheDataset(scope *Scope, input_dataset tf.Output, filename tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
49916	if scope.Err() != nil {
49917		return
49918	}
49919	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
49920	opspec := tf.OpSpec{
49921		Type: "CacheDataset",
49922		Input: []tf.Input{
49923			input_dataset, filename,
49924		},
49925		Attrs: attrs,
49926	}
49927	op := scope.AddOperation(opspec)
49928	return op.Output(0)
49929}
49930
49931// ThreadPoolHandleAttr is an optional argument to ThreadPoolHandle.
49932type ThreadPoolHandleAttr func(optionalAttr)
49933
49934// ThreadPoolHandleMaxIntraOpParallelism sets the optional max_intra_op_parallelism attribute to value.
49935//
49936// value: The maximum degree of parallelism to use within operations that execute on this
49937// threadpool.
49938// If not specified, defaults to 1
49939func ThreadPoolHandleMaxIntraOpParallelism(value int64) ThreadPoolHandleAttr {
49940	return func(m optionalAttr) {
49941		m["max_intra_op_parallelism"] = value
49942	}
49943}
49944
49945// ThreadPoolHandleContainer sets the optional container attribute to value.
49946// If not specified, defaults to ""
49947func ThreadPoolHandleContainer(value string) ThreadPoolHandleAttr {
49948	return func(m optionalAttr) {
49949		m["container"] = value
49950	}
49951}
49952
49953// ThreadPoolHandleSharedName sets the optional shared_name attribute to value.
49954// If not specified, defaults to ""
49955func ThreadPoolHandleSharedName(value string) ThreadPoolHandleAttr {
49956	return func(m optionalAttr) {
49957		m["shared_name"] = value
49958	}
49959}
49960
49961// Creates a dataset that uses a custom thread pool to compute `input_dataset`.
49962//
49963// Arguments:
49964//	num_threads: The number of threads in the thread pool.
49965//	display_name: A human-readable name for the threads that may be visible in some
49966// visualizations.
49967// threadpool.
49968//
49969// Returns A resource that can be consumed by one or more ExperimentalThreadPoolDataset
49970// ops.
49971func ThreadPoolHandle(scope *Scope, num_threads int64, display_name string, optional ...ThreadPoolHandleAttr) (handle tf.Output) {
49972	if scope.Err() != nil {
49973		return
49974	}
49975	attrs := map[string]interface{}{"num_threads": num_threads, "display_name": display_name}
49976	for _, a := range optional {
49977		a(attrs)
49978	}
49979	opspec := tf.OpSpec{
49980		Type: "ThreadPoolHandle",
49981
49982		Attrs: attrs,
49983	}
49984	op := scope.AddOperation(opspec)
49985	return op.Output(0)
49986}
49987
49988// SparseReduceMaxSparseAttr is an optional argument to SparseReduceMaxSparse.
49989type SparseReduceMaxSparseAttr func(optionalAttr)
49990
49991// SparseReduceMaxSparseKeepDims sets the optional keep_dims attribute to value.
49992//
49993// value: If true, retain reduced dimensions with length 1.
49994// If not specified, defaults to false
49995func SparseReduceMaxSparseKeepDims(value bool) SparseReduceMaxSparseAttr {
49996	return func(m optionalAttr) {
49997		m["keep_dims"] = value
49998	}
49999}
50000
50001// Computes the max of elements across dimensions of a SparseTensor.
50002//
50003// This Op takes a SparseTensor and is the sparse counterpart to
50004// `tf.reduce_max()`.  In contrast to SparseReduceMax, this Op returns a
50005// SparseTensor.
50006//
50007// Reduces `sp_input` along the dimensions given in `reduction_axes`.  Unless
50008// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
50009// `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
50010// with length 1.
50011//
50012// If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
50013// with a single element is returned.  Additionally, the axes can be negative,
50014// which are interpreted according to the indexing rules in Python.
50015//
50016// Arguments:
50017//	input_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
50018// SparseTensor, possibly not in canonical ordering.
50019//	input_values: 1-D.  `N` non-empty values corresponding to `input_indices`.
50020//	input_shape: 1-D.  Shape of the input SparseTensor.
50021//	reduction_axes: 1-D.  Length-`K` vector containing the reduction axes.
50022func SparseReduceMaxSparse(scope *Scope, input_indices tf.Output, input_values tf.Output, input_shape tf.Output, reduction_axes tf.Output, optional ...SparseReduceMaxSparseAttr) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output) {
50023	if scope.Err() != nil {
50024		return
50025	}
50026	attrs := map[string]interface{}{}
50027	for _, a := range optional {
50028		a(attrs)
50029	}
50030	opspec := tf.OpSpec{
50031		Type: "SparseReduceMaxSparse",
50032		Input: []tf.Input{
50033			input_indices, input_values, input_shape, reduction_axes,
50034		},
50035		Attrs: attrs,
50036	}
50037	op := scope.AddOperation(opspec)
50038	return op.Output(0), op.Output(1), op.Output(2)
50039}
50040
50041// LoadTPUEmbeddingStochasticGradientDescentParametersAttr is an optional argument to LoadTPUEmbeddingStochasticGradientDescentParameters.
50042type LoadTPUEmbeddingStochasticGradientDescentParametersAttr func(optionalAttr)
50043
50044// LoadTPUEmbeddingStochasticGradientDescentParametersTableId sets the optional table_id attribute to value.
50045// If not specified, defaults to -1
50046func LoadTPUEmbeddingStochasticGradientDescentParametersTableId(value int64) LoadTPUEmbeddingStochasticGradientDescentParametersAttr {
50047	return func(m optionalAttr) {
50048		m["table_id"] = value
50049	}
50050}
50051
50052// LoadTPUEmbeddingStochasticGradientDescentParametersTableName sets the optional table_name attribute to value.
50053// If not specified, defaults to ""
50054func LoadTPUEmbeddingStochasticGradientDescentParametersTableName(value string) LoadTPUEmbeddingStochasticGradientDescentParametersAttr {
50055	return func(m optionalAttr) {
50056		m["table_name"] = value
50057	}
50058}
50059
50060// LoadTPUEmbeddingStochasticGradientDescentParametersConfig sets the optional config attribute to value.
50061// If not specified, defaults to ""
50062func LoadTPUEmbeddingStochasticGradientDescentParametersConfig(value string) LoadTPUEmbeddingStochasticGradientDescentParametersAttr {
50063	return func(m optionalAttr) {
50064		m["config"] = value
50065	}
50066}
50067
50068// Load SGD embedding parameters.
50069//
50070// An op that loads optimization parameters into HBM for embedding. Must be
50071// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
50072// embedding table configuration. For example, this op is used to install
50073// parameters that are loaded from a checkpoint before a training loop is
50074// executed.
50075//
50076// Arguments:
50077//	parameters: Value of parameters used in the stochastic gradient descent optimization algorithm.
50078//
50079//
50080//
50081// Returns the created operation.
50082func LoadTPUEmbeddingStochasticGradientDescentParameters(scope *Scope, parameters tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingStochasticGradientDescentParametersAttr) (o *tf.Operation) {
50083	if scope.Err() != nil {
50084		return
50085	}
50086	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
50087	for _, a := range optional {
50088		a(attrs)
50089	}
50090	opspec := tf.OpSpec{
50091		Type: "LoadTPUEmbeddingStochasticGradientDescentParameters",
50092		Input: []tf.Input{
50093			parameters,
50094		},
50095		Attrs: attrs,
50096	}
50097	return scope.AddOperation(opspec)
50098}
50099
50100// FusedResizeAndPadConv2DAttr is an optional argument to FusedResizeAndPadConv2D.
50101type FusedResizeAndPadConv2DAttr func(optionalAttr)
50102
50103// FusedResizeAndPadConv2DResizeAlignCorners sets the optional resize_align_corners attribute to value.
50104//
50105// value: If true, the centers of the 4 corner pixels of the input and output tensors are
50106// aligned, preserving the values at the corner pixels. Defaults to false.
50107// If not specified, defaults to false
50108func FusedResizeAndPadConv2DResizeAlignCorners(value bool) FusedResizeAndPadConv2DAttr {
50109	return func(m optionalAttr) {
50110		m["resize_align_corners"] = value
50111	}
50112}
50113
50114// Performs a resize and padding as a preprocess during a convolution.
50115//
50116// It's often possible to do spatial transformations more efficiently as part of
50117// the packing stage of a convolution, so this op allows for an optimized
50118// implementation where these stages are fused together. This prevents the need to
50119// write out the intermediate results as whole tensors, reducing memory pressure,
50120// and we can get some latency gains by merging the transformation calculations.
50121// The data_format attribute for Conv2D isn't supported by this op, and defaults to
50122// 'NHWC' order.
50123// Internally this op uses a single per-graph scratch buffer, which means that it
50124// will block if multiple versions are being run in parallel. This is because this
50125// operator is primarily an optimization to minimize memory usage.
50126//
50127// Arguments:
50128//	input: 4-D with shape `[batch, in_height, in_width, in_channels]`.
50129//	size: A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
50130// new size for the images.
50131//	paddings: A two-column matrix specifying the padding sizes. The number of
50132// rows must be the same as the rank of `input`.
50133//	filter: 4-D with shape
50134// `[filter_height, filter_width, in_channels, out_channels]`.
50135//
50136//	strides: 1-D of length 4.  The stride of the sliding window for each dimension
50137// of `input`. Must be in the same order as the dimension specified with format.
50138//	padding: The type of padding algorithm to use.
50139func FusedResizeAndPadConv2D(scope *Scope, input tf.Output, size tf.Output, paddings tf.Output, filter tf.Output, mode string, strides []int64, padding string, optional ...FusedResizeAndPadConv2DAttr) (output tf.Output) {
50140	if scope.Err() != nil {
50141		return
50142	}
50143	attrs := map[string]interface{}{"mode": mode, "strides": strides, "padding": padding}
50144	for _, a := range optional {
50145		a(attrs)
50146	}
50147	opspec := tf.OpSpec{
50148		Type: "FusedResizeAndPadConv2D",
50149		Input: []tf.Input{
50150			input, size, paddings, filter,
50151		},
50152		Attrs: attrs,
50153	}
50154	op := scope.AddOperation(opspec)
50155	return op.Output(0)
50156}
50157
50158// Creates a dataset that zips together `input_datasets`.
50159//
50160// The elements of the resulting dataset are created by zipping corresponding
50161// elements from each of the input datasets.
50162//
50163// The size of the resulting dataset will match the size of the smallest input
50164// dataset, and no error will be raised if input datasets have different sizes.
50165//
50166// Arguments:
50167//	input_datasets: List of `N` variant Tensors representing datasets to be zipped together.
50168//
50169//
50170func ZipDataset(scope *Scope, input_datasets []tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
50171	if scope.Err() != nil {
50172		return
50173	}
50174	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
50175	opspec := tf.OpSpec{
50176		Type: "ZipDataset",
50177		Input: []tf.Input{
50178			tf.OutputList(input_datasets),
50179		},
50180		Attrs: attrs,
50181	}
50182	op := scope.AddOperation(opspec)
50183	return op.Output(0)
50184}
50185
50186// Rounds the values of a tensor to the nearest integer, element-wise.
50187//
50188// Rounds half to even.  Also known as bankers rounding. If you want to round
50189// according to the current system rounding mode use std::cint.
50190func Round(scope *Scope, x tf.Output) (y tf.Output) {
50191	if scope.Err() != nil {
50192		return
50193	}
50194	opspec := tf.OpSpec{
50195		Type: "Round",
50196		Input: []tf.Input{
50197			x,
50198		},
50199	}
50200	op := scope.AddOperation(opspec)
50201	return op.Output(0)
50202}
50203
50204// Sends the named tensor to another XLA computation. Wraps the XLA Send operator
50205//
50206// documented at
50207//  https://www.tensorflow.org/performance/xla/operation_semantics#send .
50208//
50209// Arguments:
50210//	tensor: The tensor to send.
50211//	tensor_name: A string key that identifies the channel.
50212//
50213// Returns the created operation.
50214func XlaSend(scope *Scope, tensor tf.Output, tensor_name string) (o *tf.Operation) {
50215	if scope.Err() != nil {
50216		return
50217	}
50218	attrs := map[string]interface{}{"tensor_name": tensor_name}
50219	opspec := tf.OpSpec{
50220		Type: "XlaSend",
50221		Input: []tf.Input{
50222			tensor,
50223		},
50224		Attrs: attrs,
50225	}
50226	return scope.AddOperation(opspec)
50227}
50228
50229// Returns the index of a data point that should be added to the seed set.
50230//
50231// Entries in distances are assumed to be squared distances of candidate points to
50232// the already sampled centers in the seed set. The op constructs one Markov chain
50233// of the k-MC^2 algorithm and returns the index of one candidate point to be added
50234// as an additional cluster center.
50235//
50236// Arguments:
50237//	distances: Vector with squared distances to the closest previously sampled cluster center
50238// for each candidate point.
50239//	seed: Scalar. Seed for initializing the random number generator.
50240//
50241// Returns Scalar with the index of the sampled point.
50242func KMC2ChainInitialization(scope *Scope, distances tf.Output, seed tf.Output) (index tf.Output) {
50243	if scope.Err() != nil {
50244		return
50245	}
50246	opspec := tf.OpSpec{
50247		Type: "KMC2ChainInitialization",
50248		Input: []tf.Input{
50249			distances, seed,
50250		},
50251	}
50252	op := scope.AddOperation(opspec)
50253	return op.Output(0)
50254}
50255
50256// Creates a tree ensemble model and returns a handle to it.
50257//
50258// Arguments:
50259//	tree_ensemble_handle: Handle to the tree ensemble resource to be created.
50260//	stamp_token: Token to use as the initial value of the resource stamp.
50261//	tree_ensemble_serialized: Serialized proto of the tree ensemble.
50262//
50263// Returns the created operation.
50264func BoostedTreesCreateEnsemble(scope *Scope, tree_ensemble_handle tf.Output, stamp_token tf.Output, tree_ensemble_serialized tf.Output) (o *tf.Operation) {
50265	if scope.Err() != nil {
50266		return
50267	}
50268	opspec := tf.OpSpec{
50269		Type: "BoostedTreesCreateEnsemble",
50270		Input: []tf.Input{
50271			tree_ensemble_handle, stamp_token, tree_ensemble_serialized,
50272		},
50273	}
50274	return scope.AddOperation(opspec)
50275}
50276
50277// Calculates the softmax of a CSRSparseMatrix.
50278//
50279// Calculate the softmax of the innermost dimensions of a SparseMatrix.
50280//
50281// Missing values are treated as `-inf` (i.e., logits of zero probability); and
50282// the output has the same sparsity structure as the input (though missing values
50283// in the output may now be treated as having probability zero).
50284//
50285// Arguments:
50286//	logits: A CSRSparseMatrix.
50287//
50288//
50289// Returns A CSRSparseMatrix.
50290func SparseMatrixSoftmax(scope *Scope, logits tf.Output, type_ tf.DataType) (softmax tf.Output) {
50291	if scope.Err() != nil {
50292		return
50293	}
50294	attrs := map[string]interface{}{"type": type_}
50295	opspec := tf.OpSpec{
50296		Type: "SparseMatrixSoftmax",
50297		Input: []tf.Input{
50298			logits,
50299		},
50300		Attrs: attrs,
50301	}
50302	op := scope.AddOperation(opspec)
50303	return op.Output(0)
50304}
50305
50306// QrAttr is an optional argument to Qr.
50307type QrAttr func(optionalAttr)
50308
50309// QrFullMatrices sets the optional full_matrices attribute to value.
50310//
50311// value: If true, compute full-sized `q` and `r`. If false
50312// (the default), compute only the leading `P` columns of `q`.
50313// If not specified, defaults to false
50314func QrFullMatrices(value bool) QrAttr {
50315	return func(m optionalAttr) {
50316		m["full_matrices"] = value
50317	}
50318}
50319
50320// Computes the QR decompositions of one or more matrices.
50321//
50322// Computes the QR decomposition of each inner matrix in `tensor` such that
50323// `tensor[..., :, :] = q[..., :, :] * r[..., :,:])`
50324//
50325// Currently, the gradient for the QR decomposition is well-defined only when
50326// the first `P` columns of the inner matrix are linearly independent, where
50327// `P` is the minimum of `M` and `N`, the 2 inner-most dimmensions of `tensor`.
50328//
50329// ```python
50330// # a is a tensor.
50331// # q is a tensor of orthonormal matrices.
50332// # r is a tensor of upper triangular matrices.
50333// q, r = qr(a)
50334// q_full, r_full = qr(a, full_matrices=True)
50335// ```
50336//
50337// Arguments:
50338//	input: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions
50339// form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`.
50340//
50341// Returns:
50342//	q: Orthonormal basis for range of `a`. If `full_matrices` is `False` then
50343// shape is `[..., M, P]`; if `full_matrices` is `True` then shape is
50344// `[..., M, M]`.
50345//	r: Triangular factor. If `full_matrices` is `False` then shape is
50346// `[..., P, N]`. If `full_matrices` is `True` then shape is `[..., M, N]`.
50347func Qr(scope *Scope, input tf.Output, optional ...QrAttr) (q tf.Output, r tf.Output) {
50348	if scope.Err() != nil {
50349		return
50350	}
50351	attrs := map[string]interface{}{}
50352	for _, a := range optional {
50353		a(attrs)
50354	}
50355	opspec := tf.OpSpec{
50356		Type: "Qr",
50357		Input: []tf.Input{
50358			input,
50359		},
50360		Attrs: attrs,
50361	}
50362	op := scope.AddOperation(opspec)
50363	return op.Output(0), op.Output(1)
50364}
50365
50366// Retrieve multiple values from the computation outfeed. Device ordinal is a
50367// tensor allowing dynamic outfeed.
50368//
50369// This operation will block indefinitely until data is available. Output `i`
50370// corresponds to XLA tuple element `i`.
50371//
50372// Arguments:
50373//	device_ordinal: An int scalar tensor, representing the TPU device to use. This should be -1 when
50374// the Op is running on a TPU device, and >= 0 when the Op is running on the CPU
50375// device.
50376//	dtypes: The element types of each element in `outputs`.
50377//	shapes: The shapes of each tensor in `outputs`.
50378//
50379// Returns A list of tensors that will be read from the outfeed.
50380func OutfeedDequeueTupleV2(scope *Scope, device_ordinal tf.Output, dtypes []tf.DataType, shapes []tf.Shape) (outputs []tf.Output) {
50381	if scope.Err() != nil {
50382		return
50383	}
50384	attrs := map[string]interface{}{"dtypes": dtypes, "shapes": shapes}
50385	opspec := tf.OpSpec{
50386		Type: "OutfeedDequeueTupleV2",
50387		Input: []tf.Input{
50388			device_ordinal,
50389		},
50390		Attrs: attrs,
50391	}
50392	op := scope.AddOperation(opspec)
50393	if scope.Err() != nil {
50394		return
50395	}
50396	var idx int
50397	var err error
50398	if outputs, idx, err = makeOutputList(op, idx, "outputs"); err != nil {
50399		scope.UpdateErr("OutfeedDequeueTupleV2", err)
50400		return
50401	}
50402	return outputs
50403}
50404
50405// Replica ID.
50406func XlaReplicaId(scope *Scope) (id tf.Output) {
50407	if scope.Err() != nil {
50408		return
50409	}
50410	opspec := tf.OpSpec{
50411		Type: "XlaReplicaId",
50412	}
50413	op := scope.AddOperation(opspec)
50414	return op.Output(0)
50415}
50416
50417// Returns conj(x - y)(x - y) element-wise.
50418//
50419// *NOTE*: `SquaredDifference` supports broadcasting. More about broadcasting
50420// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
50421func SquaredDifference(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
50422	if scope.Err() != nil {
50423		return
50424	}
50425	opspec := tf.OpSpec{
50426		Type: "SquaredDifference",
50427		Input: []tf.Input{
50428			x, y,
50429		},
50430	}
50431	op := scope.AddOperation(opspec)
50432	return op.Output(0)
50433}
50434
50435// Makes a copy of `x`.
50436//
50437// Arguments:
50438//	x: The source tensor of type `T`.
50439//
50440// Returns     y: A `Tensor` of type `T`. A copy of `x`. Guaranteed that `y`
50441//       is not an alias of `x`.
50442func DeepCopy(scope *Scope, x tf.Output) (y tf.Output) {
50443	if scope.Err() != nil {
50444		return
50445	}
50446	opspec := tf.OpSpec{
50447		Type: "DeepCopy",
50448		Input: []tf.Input{
50449			x,
50450		},
50451	}
50452	op := scope.AddOperation(opspec)
50453	return op.Output(0)
50454}
50455
50456// Retrieves a single tensor from the computation outfeed. Device ordinal is a
50457// tensor allowing dynamic outfeed.
50458//
50459// This operation will block indefinitely until data is available.
50460//
50461// Arguments:
50462//	device_ordinal: An int scalar tensor, representing the TPU device to use. This should be -1 when
50463// the Op is running on a TPU device, and >= 0 when the Op is running on the CPU
50464// device.
50465//	dtype: The type of elements in the tensor.
50466//	shape: The shape of the tensor.
50467//
50468// Returns A tensor that will be read from the device outfeed.
50469func OutfeedDequeueV2(scope *Scope, device_ordinal tf.Output, dtype tf.DataType, shape tf.Shape) (output tf.Output) {
50470	if scope.Err() != nil {
50471		return
50472	}
50473	attrs := map[string]interface{}{"dtype": dtype, "shape": shape}
50474	opspec := tf.OpSpec{
50475		Type: "OutfeedDequeueV2",
50476		Input: []tf.Input{
50477			device_ordinal,
50478		},
50479		Attrs: attrs,
50480	}
50481	op := scope.AddOperation(opspec)
50482	return op.Output(0)
50483}
50484
50485// CollectiveGatherV2Attr is an optional argument to CollectiveGatherV2.
50486type CollectiveGatherV2Attr func(optionalAttr)
50487
50488// CollectiveGatherV2CommunicationHint sets the optional communication_hint attribute to value.
50489// If not specified, defaults to "auto"
50490func CollectiveGatherV2CommunicationHint(value string) CollectiveGatherV2Attr {
50491	return func(m optionalAttr) {
50492		m["communication_hint"] = value
50493	}
50494}
50495
50496// CollectiveGatherV2TimeoutSeconds sets the optional timeout_seconds attribute to value.
50497// If not specified, defaults to 0
50498func CollectiveGatherV2TimeoutSeconds(value float32) CollectiveGatherV2Attr {
50499	return func(m optionalAttr) {
50500		m["timeout_seconds"] = value
50501	}
50502}
50503
50504// Mutually accumulates multiple tensors of identical type and shape.
50505func CollectiveGatherV2(scope *Scope, input tf.Output, group_size tf.Output, group_key tf.Output, instance_key tf.Output, ordering_token []tf.Output, optional ...CollectiveGatherV2Attr) (data tf.Output) {
50506	if scope.Err() != nil {
50507		return
50508	}
50509	attrs := map[string]interface{}{}
50510	for _, a := range optional {
50511		a(attrs)
50512	}
50513	opspec := tf.OpSpec{
50514		Type: "CollectiveGatherV2",
50515		Input: []tf.Input{
50516			input, group_size, group_key, instance_key, tf.OutputList(ordering_token),
50517		},
50518		Attrs: attrs,
50519	}
50520	op := scope.AddOperation(opspec)
50521	return op.Output(0)
50522}
50523
50524// RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebugAttr is an optional argument to RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug.
50525type RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebugAttr func(optionalAttr)
50526
50527// RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebugTableId sets the optional table_id attribute to value.
50528// If not specified, defaults to -1
50529func RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebugTableId(value int64) RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebugAttr {
50530	return func(m optionalAttr) {
50531		m["table_id"] = value
50532	}
50533}
50534
50535// RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebugTableName sets the optional table_name attribute to value.
50536// If not specified, defaults to ""
50537func RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebugTableName(value string) RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebugAttr {
50538	return func(m optionalAttr) {
50539		m["table_name"] = value
50540	}
50541}
50542
50543// RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebugConfig sets the optional config attribute to value.
50544// If not specified, defaults to ""
50545func RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebugConfig(value string) RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebugAttr {
50546	return func(m optionalAttr) {
50547		m["config"] = value
50548	}
50549}
50550
50551// Retrieve Adadelta embedding parameters with debug support.
50552//
50553// An op that retrieves optimization parameters from embedding to host
50554// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
50555// the correct embedding table configuration. For example, this op is
50556// used to retrieve updated parameters before saving a checkpoint.
50557//
50558// Returns:
50559//	parameters: Parameter parameters updated by the Adadelta optimization algorithm.
50560//	accumulators: Parameter accumulators updated by the Adadelta optimization algorithm.
50561//	updates: Parameter updates updated by the Adadelta optimization algorithm.
50562//	gradient_accumulators: Parameter gradient_accumulators updated by the Adadelta optimization algorithm.
50563func RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebugAttr) (parameters tf.Output, accumulators tf.Output, updates tf.Output, gradient_accumulators tf.Output) {
50564	if scope.Err() != nil {
50565		return
50566	}
50567	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
50568	for _, a := range optional {
50569		a(attrs)
50570	}
50571	opspec := tf.OpSpec{
50572		Type: "RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug",
50573
50574		Attrs: attrs,
50575	}
50576	op := scope.AddOperation(opspec)
50577	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
50578}
50579
50580// RetrieveTPUEmbeddingFrequencyEstimatorParametersAttr is an optional argument to RetrieveTPUEmbeddingFrequencyEstimatorParameters.
50581type RetrieveTPUEmbeddingFrequencyEstimatorParametersAttr func(optionalAttr)
50582
50583// RetrieveTPUEmbeddingFrequencyEstimatorParametersTableId sets the optional table_id attribute to value.
50584// If not specified, defaults to -1
50585func RetrieveTPUEmbeddingFrequencyEstimatorParametersTableId(value int64) RetrieveTPUEmbeddingFrequencyEstimatorParametersAttr {
50586	return func(m optionalAttr) {
50587		m["table_id"] = value
50588	}
50589}
50590
50591// RetrieveTPUEmbeddingFrequencyEstimatorParametersTableName sets the optional table_name attribute to value.
50592// If not specified, defaults to ""
50593func RetrieveTPUEmbeddingFrequencyEstimatorParametersTableName(value string) RetrieveTPUEmbeddingFrequencyEstimatorParametersAttr {
50594	return func(m optionalAttr) {
50595		m["table_name"] = value
50596	}
50597}
50598
50599// RetrieveTPUEmbeddingFrequencyEstimatorParametersConfig sets the optional config attribute to value.
50600// If not specified, defaults to ""
50601func RetrieveTPUEmbeddingFrequencyEstimatorParametersConfig(value string) RetrieveTPUEmbeddingFrequencyEstimatorParametersAttr {
50602	return func(m optionalAttr) {
50603		m["config"] = value
50604	}
50605}
50606
50607// Retrieve frequency estimator embedding parameters.
50608//
50609// An op that retrieves optimization parameters from embedding to host
50610// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
50611// the correct embedding table configuration. For example, this op is
50612// used to retrieve updated parameters before saving a checkpoint.
50613//
50614// Returns:
50615//	parameters: Parameter parameters updated by the frequency estimator optimization algorithm.
50616//	last_hit_step: Parameter last_hit_step updated by the frequency estimator optimization
50617// algorithm.
50618func RetrieveTPUEmbeddingFrequencyEstimatorParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingFrequencyEstimatorParametersAttr) (parameters tf.Output, last_hit_step tf.Output) {
50619	if scope.Err() != nil {
50620		return
50621	}
50622	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
50623	for _, a := range optional {
50624		a(attrs)
50625	}
50626	opspec := tf.OpSpec{
50627		Type: "RetrieveTPUEmbeddingFrequencyEstimatorParameters",
50628
50629		Attrs: attrs,
50630	}
50631	op := scope.AddOperation(opspec)
50632	return op.Output(0), op.Output(1)
50633}
50634
50635// CropAndResizeGradImageAttr is an optional argument to CropAndResizeGradImage.
50636type CropAndResizeGradImageAttr func(optionalAttr)
50637
50638// CropAndResizeGradImageMethod sets the optional method attribute to value.
50639//
50640// value: A string specifying the interpolation method. Only 'bilinear' is
50641// supported for now.
50642// If not specified, defaults to "bilinear"
50643func CropAndResizeGradImageMethod(value string) CropAndResizeGradImageAttr {
50644	return func(m optionalAttr) {
50645		m["method"] = value
50646	}
50647}
50648
50649// Computes the gradient of the crop_and_resize op wrt the input image tensor.
50650//
50651// Arguments:
50652//	grads: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.
50653//	boxes: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
50654// specifies the coordinates of a box in the `box_ind[i]` image and is specified
50655// in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of
50656// `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the
50657// `[0, 1]` interval of normalized image height is mapped to
50658// `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in
50659// which case the sampled crop is an up-down flipped version of the original
50660// image. The width dimension is treated similarly. Normalized coordinates
50661// outside the `[0, 1]` range are allowed, in which case we use
50662// `extrapolation_value` to extrapolate the input image values.
50663//	box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
50664// The value of `box_ind[i]` specifies the image that the `i`-th box refers to.
50665//	image_size: A 1-D tensor with value `[batch, image_height, image_width, depth]`
50666// containing the original image size. Both `image_height` and `image_width` need
50667// to be positive.
50668//
50669//
50670// Returns A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
50671func CropAndResizeGradImage(scope *Scope, grads tf.Output, boxes tf.Output, box_ind tf.Output, image_size tf.Output, T tf.DataType, optional ...CropAndResizeGradImageAttr) (output tf.Output) {
50672	if scope.Err() != nil {
50673		return
50674	}
50675	attrs := map[string]interface{}{"T": T}
50676	for _, a := range optional {
50677		a(attrs)
50678	}
50679	opspec := tf.OpSpec{
50680		Type: "CropAndResizeGradImage",
50681		Input: []tf.Input{
50682			grads, boxes, box_ind, image_size,
50683		},
50684		Attrs: attrs,
50685	}
50686	op := scope.AddOperation(opspec)
50687	return op.Output(0)
50688}
50689
50690// OutfeedDequeueAttr is an optional argument to OutfeedDequeue.
50691type OutfeedDequeueAttr func(optionalAttr)
50692
50693// OutfeedDequeueDeviceOrdinal sets the optional device_ordinal attribute to value.
50694//
50695// value: The TPU device to use. This should be -1 when the Op
50696// is running on a TPU device, and >= 0 when the Op is running on the CPU
50697// device.
50698// If not specified, defaults to -1
50699func OutfeedDequeueDeviceOrdinal(value int64) OutfeedDequeueAttr {
50700	return func(m optionalAttr) {
50701		m["device_ordinal"] = value
50702	}
50703}
50704
50705// Retrieves a single tensor from the computation outfeed.
50706//
50707// This operation will block indefinitely until data is available.
50708//
50709// Arguments:
50710//	dtype: The type of elements in the tensor.
50711//	shape: The shape of the tensor.
50712//
50713// Returns A tensor that will be read from the device outfeed.
50714func OutfeedDequeue(scope *Scope, dtype tf.DataType, shape tf.Shape, optional ...OutfeedDequeueAttr) (output tf.Output) {
50715	if scope.Err() != nil {
50716		return
50717	}
50718	attrs := map[string]interface{}{"dtype": dtype, "shape": shape}
50719	for _, a := range optional {
50720		a(attrs)
50721	}
50722	opspec := tf.OpSpec{
50723		Type: "OutfeedDequeue",
50724
50725		Attrs: attrs,
50726	}
50727	op := scope.AddOperation(opspec)
50728	return op.Output(0)
50729}
50730
50731// AutoShardDatasetAttr is an optional argument to AutoShardDataset.
50732type AutoShardDatasetAttr func(optionalAttr)
50733
50734// AutoShardDatasetAutoShardPolicy sets the optional auto_shard_policy attribute to value.
50735// If not specified, defaults to 0
50736func AutoShardDatasetAutoShardPolicy(value int64) AutoShardDatasetAttr {
50737	return func(m optionalAttr) {
50738		m["auto_shard_policy"] = value
50739	}
50740}
50741
50742// AutoShardDatasetNumReplicas sets the optional num_replicas attribute to value.
50743// If not specified, defaults to 0
50744func AutoShardDatasetNumReplicas(value int64) AutoShardDatasetAttr {
50745	return func(m optionalAttr) {
50746		m["num_replicas"] = value
50747	}
50748}
50749
50750// Creates a dataset that shards the input dataset.
50751//
50752// Creates a dataset that shards the input dataset by num_workers, returning a
50753// sharded dataset for the index-th worker. This attempts to automatically shard
50754// a dataset by examining the Dataset graph and inserting a shard op before the
50755// inputs to a reader Dataset (e.g. CSVDataset, TFRecordDataset).
50756//
50757// This dataset will throw a NotFound error if we cannot shard the dataset
50758// automatically.
50759//
50760// Arguments:
50761//	input_dataset: A variant tensor representing the input dataset.
50762//	num_workers: A scalar representing the number of workers to distribute this dataset across.
50763//	index: A scalar representing the index of the current worker out of num_workers.
50764//
50765//
50766func AutoShardDataset(scope *Scope, input_dataset tf.Output, num_workers tf.Output, index tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...AutoShardDatasetAttr) (handle tf.Output) {
50767	if scope.Err() != nil {
50768		return
50769	}
50770	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
50771	for _, a := range optional {
50772		a(attrs)
50773	}
50774	opspec := tf.OpSpec{
50775		Type: "AutoShardDataset",
50776		Input: []tf.Input{
50777			input_dataset, num_workers, index,
50778		},
50779		Attrs: attrs,
50780	}
50781	op := scope.AddOperation(opspec)
50782	return op.Output(0)
50783}
50784
50785// DecodeCompressedAttr is an optional argument to DecodeCompressed.
50786type DecodeCompressedAttr func(optionalAttr)
50787
50788// DecodeCompressedCompressionType sets the optional compression_type attribute to value.
50789//
50790// value: A scalar containing either (i) the empty string (no
50791// compression), (ii) "ZLIB", or (iii) "GZIP".
50792// If not specified, defaults to ""
50793func DecodeCompressedCompressionType(value string) DecodeCompressedAttr {
50794	return func(m optionalAttr) {
50795		m["compression_type"] = value
50796	}
50797}
50798
50799// Decompress strings.
50800//
50801// This op decompresses each element of the `bytes` input `Tensor`, which
50802// is assumed to be compressed using the given `compression_type`.
50803//
50804// The `output` is a string `Tensor` of the same shape as `bytes`,
50805// each element containing the decompressed data from the corresponding
50806// element in `bytes`.
50807//
50808// Arguments:
50809//	bytes: A Tensor of string which is compressed.
50810//
50811// Returns A Tensor with the same shape as input `bytes`, uncompressed
50812// from bytes.
50813func DecodeCompressed(scope *Scope, bytes tf.Output, optional ...DecodeCompressedAttr) (output tf.Output) {
50814	if scope.Err() != nil {
50815		return
50816	}
50817	attrs := map[string]interface{}{}
50818	for _, a := range optional {
50819		a(attrs)
50820	}
50821	opspec := tf.OpSpec{
50822		Type: "DecodeCompressed",
50823		Input: []tf.Input{
50824			bytes,
50825		},
50826		Attrs: attrs,
50827	}
50828	op := scope.AddOperation(opspec)
50829	return op.Output(0)
50830}
50831
50832// DecodeJpegAttr is an optional argument to DecodeJpeg.
50833type DecodeJpegAttr func(optionalAttr)
50834
50835// DecodeJpegChannels sets the optional channels attribute to value.
50836//
50837// value: Number of color channels for the decoded image.
50838// If not specified, defaults to 0
50839func DecodeJpegChannels(value int64) DecodeJpegAttr {
50840	return func(m optionalAttr) {
50841		m["channels"] = value
50842	}
50843}
50844
50845// DecodeJpegRatio sets the optional ratio attribute to value.
50846//
50847// value: Downscaling ratio.
50848// If not specified, defaults to 1
50849func DecodeJpegRatio(value int64) DecodeJpegAttr {
50850	return func(m optionalAttr) {
50851		m["ratio"] = value
50852	}
50853}
50854
50855// DecodeJpegFancyUpscaling sets the optional fancy_upscaling attribute to value.
50856//
50857// value: If true use a slower but nicer upscaling of the
50858// chroma planes (yuv420/422 only).
50859// If not specified, defaults to true
50860func DecodeJpegFancyUpscaling(value bool) DecodeJpegAttr {
50861	return func(m optionalAttr) {
50862		m["fancy_upscaling"] = value
50863	}
50864}
50865
50866// DecodeJpegTryRecoverTruncated sets the optional try_recover_truncated attribute to value.
50867//
50868// value: If true try to recover an image from truncated input.
50869// If not specified, defaults to false
50870func DecodeJpegTryRecoverTruncated(value bool) DecodeJpegAttr {
50871	return func(m optionalAttr) {
50872		m["try_recover_truncated"] = value
50873	}
50874}
50875
50876// DecodeJpegAcceptableFraction sets the optional acceptable_fraction attribute to value.
50877//
50878// value: The minimum required fraction of lines before a truncated
50879// input is accepted.
50880// If not specified, defaults to 1
50881func DecodeJpegAcceptableFraction(value float32) DecodeJpegAttr {
50882	return func(m optionalAttr) {
50883		m["acceptable_fraction"] = value
50884	}
50885}
50886
50887// DecodeJpegDctMethod sets the optional dct_method attribute to value.
50888//
50889// value: string specifying a hint about the algorithm used for
50890// decompression.  Defaults to "" which maps to a system-specific
50891// default.  Currently valid values are ["INTEGER_FAST",
50892// "INTEGER_ACCURATE"].  The hint may be ignored (e.g., the internal
50893// jpeg library changes to a version that does not have that specific
50894// option.)
50895// If not specified, defaults to ""
50896func DecodeJpegDctMethod(value string) DecodeJpegAttr {
50897	return func(m optionalAttr) {
50898		m["dct_method"] = value
50899	}
50900}
50901
50902// Decode a JPEG-encoded image to a uint8 tensor.
50903//
50904// The attr `channels` indicates the desired number of color channels for the
50905// decoded image.
50906//
50907// Accepted values are:
50908//
50909// *   0: Use the number of channels in the JPEG-encoded image.
50910// *   1: output a grayscale image.
50911// *   3: output an RGB image.
50912//
50913// If needed, the JPEG-encoded image is transformed to match the requested number
50914// of color channels.
50915//
50916// The attr `ratio` allows downscaling the image by an integer factor during
50917// decoding.  Allowed values are: 1, 2, 4, and 8.  This is much faster than
50918// downscaling the image later.
50919//
50920//
50921// This op also supports decoding PNGs and non-animated GIFs since the interface is
50922// the same, though it is cleaner to use `tf.io.decode_image`.
50923//
50924// Arguments:
50925//	contents: 0-D.  The JPEG-encoded image.
50926//
50927// Returns 3-D with shape `[height, width, channels]`..
50928func DecodeJpeg(scope *Scope, contents tf.Output, optional ...DecodeJpegAttr) (image tf.Output) {
50929	if scope.Err() != nil {
50930		return
50931	}
50932	attrs := map[string]interface{}{}
50933	for _, a := range optional {
50934		a(attrs)
50935	}
50936	opspec := tf.OpSpec{
50937		Type: "DecodeJpeg",
50938		Input: []tf.Input{
50939			contents,
50940		},
50941		Attrs: attrs,
50942	}
50943	op := scope.AddOperation(opspec)
50944	return op.Output(0)
50945}
50946
50947// Returns the number of nonzeroes of `sparse_matrix`.
50948//
50949// Arguments:
50950//	sparse_matrix: A CSRSparseMatrix.
50951//
50952// Returns The number of nonzeroes of `sparse_matrix`.
50953func SparseMatrixNNZ(scope *Scope, sparse_matrix tf.Output) (nnz tf.Output) {
50954	if scope.Err() != nil {
50955		return
50956	}
50957	opspec := tf.OpSpec{
50958		Type: "SparseMatrixNNZ",
50959		Input: []tf.Input{
50960			sparse_matrix,
50961		},
50962	}
50963	op := scope.AddOperation(opspec)
50964	return op.Output(0)
50965}
50966
50967// Enqueue a Tensor on the computation outfeed.
50968//
50969// Arguments:
50970//	input: A tensor that will be inserted into the outfeed queue.
50971//
50972// Returns the created operation.
50973func OutfeedEnqueue(scope *Scope, input tf.Output) (o *tf.Operation) {
50974	if scope.Err() != nil {
50975		return
50976	}
50977	opspec := tf.OpSpec{
50978		Type: "OutfeedEnqueue",
50979		Input: []tf.Input{
50980			input,
50981		},
50982	}
50983	return scope.AddOperation(opspec)
50984}
50985
50986// Checks a tensor for NaN and Inf values.
50987//
50988// When run, reports an `InvalidArgument` error if `tensor` has any values
50989// that are not a number (NaN) or infinity (Inf). Otherwise, passes `tensor` as-is.
50990//
50991// Arguments:
50992//
50993//	message: Prefix of the error message.
50994func CheckNumerics(scope *Scope, tensor tf.Output, message string) (output tf.Output) {
50995	if scope.Err() != nil {
50996		return
50997	}
50998	attrs := map[string]interface{}{"message": message}
50999	opspec := tf.OpSpec{
51000		Type: "CheckNumerics",
51001		Input: []tf.Input{
51002			tensor,
51003		},
51004		Attrs: attrs,
51005	}
51006	op := scope.AddOperation(opspec)
51007	return op.Output(0)
51008}
51009
51010// Broadcast an array for a compatible shape.
51011//
51012// Broadcasting is the process of making arrays to have compatible shapes
51013// for arithmetic operations. Two shapes are compatible if for each
51014// dimension pair they are either equal or one of them is one. When trying
51015// to broadcast a Tensor to a shape, it starts with the trailing dimensions,
51016// and works its way forward.
51017//
51018// For example,
51019//
51020// >>> x = tf.constant([1, 2, 3])
51021// >>> y = tf.broadcast_to(x, [3, 3])
51022// >>> print(y)
51023// tf.Tensor(
51024//     [[1 2 3]
51025//      [1 2 3]
51026//      [1 2 3]], shape=(3, 3), dtype=int32)
51027//
51028// In the above example, the input Tensor with the shape of `[1, 3]`
51029// is broadcasted to output Tensor with shape of `[3, 3]`.
51030//
51031// When doing broadcasted operations such as multiplying a tensor
51032// by a scalar, broadcasting (usually) confers some time or space
51033// benefit, as the broadcasted tensor is never materialized.
51034//
51035// However, `broadcast_to` does not carry with it any such benefits.
51036// The newly-created tensor takes the full memory of the broadcasted
51037// shape. (In a graph context, `broadcast_to` might be fused to
51038// subsequent operation and then be optimized away, however.)
51039//
51040// Arguments:
51041//	input: A Tensor to broadcast.
51042//	shape: An 1-D `int` Tensor. The shape of the desired output.
51043//
51044// Returns A Tensor.
51045func BroadcastTo(scope *Scope, input tf.Output, shape tf.Output) (output tf.Output) {
51046	if scope.Err() != nil {
51047		return
51048	}
51049	opspec := tf.OpSpec{
51050		Type: "BroadcastTo",
51051		Input: []tf.Input{
51052			input, shape,
51053		},
51054	}
51055	op := scope.AddOperation(opspec)
51056	return op.Output(0)
51057}
51058
51059// Make all elements in the non-Batch dimension unique, but \"close\" to
51060//
51061// their initial value. Never returns a sub-normal number. Never returns
51062// zero. The sign of each input element is always identical to the sign
51063// of the corresponding output element. Behavior for infinite elements is
51064// undefined. Behavior for subnormal elements is undefined.
51065func MakeUnique(scope *Scope, input tf.Output) (output tf.Output) {
51066	if scope.Err() != nil {
51067		return
51068	}
51069	opspec := tf.OpSpec{
51070		Type: "MakeUnique",
51071		Input: []tf.Input{
51072			input,
51073		},
51074	}
51075	op := scope.AddOperation(opspec)
51076	return op.Output(0)
51077}
51078
51079// CropAndResizeAttr is an optional argument to CropAndResize.
51080type CropAndResizeAttr func(optionalAttr)
51081
51082// CropAndResizeMethod sets the optional method attribute to value.
51083//
51084// value: A string specifying the sampling method for resizing. It can be either
51085// `"bilinear"` or `"nearest"` and default to `"bilinear"`. Currently two sampling
51086// methods are supported: Bilinear and Nearest Neighbor.
51087// If not specified, defaults to "bilinear"
51088func CropAndResizeMethod(value string) CropAndResizeAttr {
51089	return func(m optionalAttr) {
51090		m["method"] = value
51091	}
51092}
51093
51094// CropAndResizeExtrapolationValue sets the optional extrapolation_value attribute to value.
51095//
51096// value: Value used for extrapolation, when applicable.
51097// If not specified, defaults to 0
51098func CropAndResizeExtrapolationValue(value float32) CropAndResizeAttr {
51099	return func(m optionalAttr) {
51100		m["extrapolation_value"] = value
51101	}
51102}
51103
51104// Extracts crops from the input image tensor and resizes them.
51105//
51106// Extracts crops from the input image tensor and resizes them using bilinear
51107// sampling or nearest neighbor sampling (possibly with aspect ratio change) to a
51108// common output size specified by `crop_size`. This is more general than the
51109// `crop_to_bounding_box` op which extracts a fixed size slice from the input image
51110// and does not allow resizing or aspect ratio change.
51111//
51112// Returns a tensor with `crops` from the input `image` at positions defined at the
51113// bounding box locations in `boxes`. The cropped boxes are all resized (with
51114// bilinear or nearest neighbor interpolation) to a fixed
51115// `size = [crop_height, crop_width]`. The result is a 4-D tensor
51116// `[num_boxes, crop_height, crop_width, depth]`. The resizing is corner aligned.
51117// In particular, if `boxes = [[0, 0, 1, 1]]`, the method will give identical
51118// results to using `tf.image.resize_bilinear()` or
51119// `tf.image.resize_nearest_neighbor()`(depends on the `method` argument) with
51120// `align_corners=True`.
51121//
51122// Arguments:
51123//	image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
51124// Both `image_height` and `image_width` need to be positive.
51125//	boxes: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
51126// specifies the coordinates of a box in the `box_ind[i]` image and is specified
51127// in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of
51128// `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the
51129// `[0, 1]` interval of normalized image height is mapped to
51130// `[0, image_height - 1]` in image height coordinates. We do allow `y1` > `y2`, in
51131// which case the sampled crop is an up-down flipped version of the original
51132// image. The width dimension is treated similarly. Normalized coordinates
51133// outside the `[0, 1]` range are allowed, in which case we use
51134// `extrapolation_value` to extrapolate the input image values.
51135//	box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
51136// The value of `box_ind[i]` specifies the image that the `i`-th box refers to.
51137//	crop_size: A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All
51138// cropped image patches are resized to this size. The aspect ratio of the image
51139// content is not preserved. Both `crop_height` and `crop_width` need to be
51140// positive.
51141//
51142// Returns A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.
51143func CropAndResize(scope *Scope, image tf.Output, boxes tf.Output, box_ind tf.Output, crop_size tf.Output, optional ...CropAndResizeAttr) (crops tf.Output) {
51144	if scope.Err() != nil {
51145		return
51146	}
51147	attrs := map[string]interface{}{}
51148	for _, a := range optional {
51149		a(attrs)
51150	}
51151	opspec := tf.OpSpec{
51152		Type: "CropAndResize",
51153		Input: []tf.Input{
51154			image, boxes, box_ind, crop_size,
51155		},
51156		Attrs: attrs,
51157	}
51158	op := scope.AddOperation(opspec)
51159	return op.Output(0)
51160}
51161
51162// DepthwiseConv2dNativeBackpropFilterAttr is an optional argument to DepthwiseConv2dNativeBackpropFilter.
51163type DepthwiseConv2dNativeBackpropFilterAttr func(optionalAttr)
51164
51165// DepthwiseConv2dNativeBackpropFilterExplicitPaddings sets the optional explicit_paddings attribute to value.
51166// If not specified, defaults to <>
51167func DepthwiseConv2dNativeBackpropFilterExplicitPaddings(value []int64) DepthwiseConv2dNativeBackpropFilterAttr {
51168	return func(m optionalAttr) {
51169		m["explicit_paddings"] = value
51170	}
51171}
51172
51173// DepthwiseConv2dNativeBackpropFilterDataFormat sets the optional data_format attribute to value.
51174//
51175// value: Specify the data format of the input and output data. With the
51176// default format "NHWC", the data is stored in the order of:
51177//     [batch, height, width, channels].
51178// Alternatively, the format could be "NCHW", the data storage order of:
51179//     [batch, channels, height, width].
51180// If not specified, defaults to "NHWC"
51181func DepthwiseConv2dNativeBackpropFilterDataFormat(value string) DepthwiseConv2dNativeBackpropFilterAttr {
51182	return func(m optionalAttr) {
51183		m["data_format"] = value
51184	}
51185}
51186
51187// DepthwiseConv2dNativeBackpropFilterDilations sets the optional dilations attribute to value.
51188//
51189// value: 1-D tensor of length 4.  The dilation factor for each dimension of
51190// `input`. If set to k > 1, there will be k-1 skipped cells between each filter
51191// element on that dimension. The dimension order is determined by the value of
51192// `data_format`, see above for details. Dilations in the batch and depth
51193// dimensions must be 1.
51194// If not specified, defaults to <i:1 i:1 i:1 i:1 >
51195func DepthwiseConv2dNativeBackpropFilterDilations(value []int64) DepthwiseConv2dNativeBackpropFilterAttr {
51196	return func(m optionalAttr) {
51197		m["dilations"] = value
51198	}
51199}
51200
51201// Computes the gradients of depthwise convolution with respect to the filter.
51202//
51203// Arguments:
51204//	input: 4-D with shape based on `data_format`.  For example, if
51205// `data_format` is 'NHWC' then `input` is a 4-D `[batch, in_height,
51206// in_width, in_channels]` tensor.
51207//	filter_sizes: An integer vector representing the tensor shape of `filter`,
51208// where `filter` is a 4-D
51209// `[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor.
51210//	out_backprop: 4-D with shape  based on `data_format`.
51211// For example, if `data_format` is 'NHWC' then
51212// out_backprop shape is `[batch, out_height, out_width, out_channels]`.
51213// Gradients w.r.t. the output of the convolution.
51214//	strides: The stride of the sliding window for each dimension of the input
51215// of the convolution.
51216//	padding: The type of padding algorithm to use.
51217//
51218// Returns 4-D with shape
51219// `[filter_height, filter_width, in_channels, out_channels]`.  Gradient w.r.t.
51220// the `filter` input of the convolution.
51221func DepthwiseConv2dNativeBackpropFilter(scope *Scope, input tf.Output, filter_sizes tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...DepthwiseConv2dNativeBackpropFilterAttr) (output tf.Output) {
51222	if scope.Err() != nil {
51223		return
51224	}
51225	attrs := map[string]interface{}{"strides": strides, "padding": padding}
51226	for _, a := range optional {
51227		a(attrs)
51228	}
51229	opspec := tf.OpSpec{
51230		Type: "DepthwiseConv2dNativeBackpropFilter",
51231		Input: []tf.Input{
51232			input, filter_sizes, out_backprop,
51233		},
51234		Attrs: attrs,
51235	}
51236	op := scope.AddOperation(opspec)
51237	return op.Output(0)
51238}
51239
51240// TridiagonalSolveAttr is an optional argument to TridiagonalSolve.
51241type TridiagonalSolveAttr func(optionalAttr)
51242
51243// TridiagonalSolvePartialPivoting sets the optional partial_pivoting attribute to value.
51244//
51245// value: Whether to apply partial pivoting. Partial pivoting makes the procedure more
51246// stable, but slower.
51247// If not specified, defaults to true
51248func TridiagonalSolvePartialPivoting(value bool) TridiagonalSolveAttr {
51249	return func(m optionalAttr) {
51250		m["partial_pivoting"] = value
51251	}
51252}
51253
51254// Solves tridiagonal systems of equations.
51255//
51256//   Solves tridiagonal systems of equations.
51257//   Supports batch dimensions and multiple right-hand sides per each left-hand
51258//   side.
51259//   On CPU, solution is computed via Gaussian elimination with or without partial
51260//   pivoting, depending on `partial_pivoting` attribute. On GPU, Nvidia's cuSPARSE
51261//   library is used: https://docs.nvidia.com/cuda/cusparse/index.html#gtsv
51262//   Partial pivoting is not yet supported by XLA backends.
51263//
51264// Arguments:
51265//	diagonals: Tensor of shape `[..., 3, M]` whose innermost 2 dimensions represent the
51266// tridiagonal matrices with three rows being the superdiagonal, diagonals, and
51267// subdiagonals, in order. The last element of the superdiagonal and the first
51268// element of the subdiagonal is ignored.
51269//	rhs: Tensor of shape `[..., M, K]`, representing K right-hand sides per each
51270// left-hand side.
51271//
51272// Returns Tensor of shape `[..., M, K]` containing the solutions
51273func TridiagonalSolve(scope *Scope, diagonals tf.Output, rhs tf.Output, optional ...TridiagonalSolveAttr) (output tf.Output) {
51274	if scope.Err() != nil {
51275		return
51276	}
51277	attrs := map[string]interface{}{}
51278	for _, a := range optional {
51279		a(attrs)
51280	}
51281	opspec := tf.OpSpec{
51282		Type: "TridiagonalSolve",
51283		Input: []tf.Input{
51284			diagonals, rhs,
51285		},
51286		Attrs: attrs,
51287	}
51288	op := scope.AddOperation(opspec)
51289	return op.Output(0)
51290}
51291
51292// An Op to exchange data across TPU replicas.
51293//
51294// On each replica, the input is split into `split_count` blocks along
51295// `split_dimension` and send to the other replicas given group_assignment. After
51296// receiving `split_count` - 1 blocks from other replicas, we concatenate the
51297// blocks along `concat_dimension` as the output.
51298//
51299// For example, suppose there are 2 TPU replicas:
51300// replica 0 receives input: `[[A, B]]`
51301// replica 1 receives input: `[[C, D]]`
51302//
51303// group_assignment=`[[0, 1]]`
51304// concat_dimension=0
51305// split_dimension=1
51306// split_count=2
51307//
51308// replica 0's output: `[[A], [C]]`
51309// replica 1's output: `[[B], [D]]`
51310//
51311// Arguments:
51312//	input: The local input to the sum.
51313//	group_assignment: An int32 tensor with shape
51314// [num_groups, num_replicas_per_group]. `group_assignment[i]` represents the
51315// replica ids in the ith subgroup.
51316//	concat_dimension: The dimension number to concatenate.
51317//	split_dimension: The dimension number to split.
51318//	split_count: The number of splits, this number must equal to the sub-group
51319// size(group_assignment.get_shape()[1])
51320//
51321// Returns The exchanged result.
51322func AllToAll(scope *Scope, input tf.Output, group_assignment tf.Output, concat_dimension int64, split_dimension int64, split_count int64) (output tf.Output) {
51323	if scope.Err() != nil {
51324		return
51325	}
51326	attrs := map[string]interface{}{"concat_dimension": concat_dimension, "split_dimension": split_dimension, "split_count": split_count}
51327	opspec := tf.OpSpec{
51328		Type: "AllToAll",
51329		Input: []tf.Input{
51330			input, group_assignment,
51331		},
51332		Attrs: attrs,
51333	}
51334	op := scope.AddOperation(opspec)
51335	return op.Output(0)
51336}
51337
51338// LoadTPUEmbeddingMDLAdagradLightParametersAttr is an optional argument to LoadTPUEmbeddingMDLAdagradLightParameters.
51339type LoadTPUEmbeddingMDLAdagradLightParametersAttr func(optionalAttr)
51340
51341// LoadTPUEmbeddingMDLAdagradLightParametersTableId sets the optional table_id attribute to value.
51342// If not specified, defaults to -1
51343func LoadTPUEmbeddingMDLAdagradLightParametersTableId(value int64) LoadTPUEmbeddingMDLAdagradLightParametersAttr {
51344	return func(m optionalAttr) {
51345		m["table_id"] = value
51346	}
51347}
51348
51349// LoadTPUEmbeddingMDLAdagradLightParametersTableName sets the optional table_name attribute to value.
51350// If not specified, defaults to ""
51351func LoadTPUEmbeddingMDLAdagradLightParametersTableName(value string) LoadTPUEmbeddingMDLAdagradLightParametersAttr {
51352	return func(m optionalAttr) {
51353		m["table_name"] = value
51354	}
51355}
51356
51357// LoadTPUEmbeddingMDLAdagradLightParametersConfig sets the optional config attribute to value.
51358// If not specified, defaults to ""
51359func LoadTPUEmbeddingMDLAdagradLightParametersConfig(value string) LoadTPUEmbeddingMDLAdagradLightParametersAttr {
51360	return func(m optionalAttr) {
51361		m["config"] = value
51362	}
51363}
51364
51365// Load MDL Adagrad Light embedding parameters.
51366//
51367// An op that loads optimization parameters into HBM for embedding. Must be
51368// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
51369// embedding table configuration. For example, this op is used to install
51370// parameters that are loaded from a checkpoint before a training loop is
51371// executed.
51372//
51373// Arguments:
51374//	parameters: Value of parameters used in the MDL Adagrad Light optimization algorithm.
51375//	accumulators: Value of accumulators used in the MDL Adagrad Light optimization algorithm.
51376//	weights: Value of weights used in the MDL Adagrad Light optimization algorithm.
51377//	benefits: Value of benefits used in the MDL Adagrad Light optimization algorithm.
51378//
51379//
51380//
51381// Returns the created operation.
51382func LoadTPUEmbeddingMDLAdagradLightParameters(scope *Scope, parameters tf.Output, accumulators tf.Output, weights tf.Output, benefits tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingMDLAdagradLightParametersAttr) (o *tf.Operation) {
51383	if scope.Err() != nil {
51384		return
51385	}
51386	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
51387	for _, a := range optional {
51388		a(attrs)
51389	}
51390	opspec := tf.OpSpec{
51391		Type: "LoadTPUEmbeddingMDLAdagradLightParameters",
51392		Input: []tf.Input{
51393			parameters, accumulators, weights, benefits,
51394		},
51395		Attrs: attrs,
51396	}
51397	return scope.AddOperation(opspec)
51398}
51399
51400// CumprodAttr is an optional argument to Cumprod.
51401type CumprodAttr func(optionalAttr)
51402
51403// CumprodExclusive sets the optional exclusive attribute to value.
51404//
51405// value: If `True`, perform exclusive cumprod.
51406// If not specified, defaults to false
51407func CumprodExclusive(value bool) CumprodAttr {
51408	return func(m optionalAttr) {
51409		m["exclusive"] = value
51410	}
51411}
51412
51413// CumprodReverse sets the optional reverse attribute to value.
51414//
51415// value: A `bool` (default: False).
51416// If not specified, defaults to false
51417func CumprodReverse(value bool) CumprodAttr {
51418	return func(m optionalAttr) {
51419		m["reverse"] = value
51420	}
51421}
51422
51423// Compute the cumulative product of the tensor `x` along `axis`.
51424//
51425// By default, this op performs an inclusive cumprod, which means that the first
51426// element of the input is identical to the first element of the output:
51427//
51428// ```python
51429// tf.cumprod([a, b, c])  # => [a, a * b, a * b * c]
51430// ```
51431//
51432// By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
51433// performed instead:
51434//
51435// ```python
51436// tf.cumprod([a, b, c], exclusive=True)  # => [1, a, a * b]
51437// ```
51438//
51439// By setting the `reverse` kwarg to `True`, the cumprod is performed in the
51440// opposite direction:
51441//
51442// ```python
51443// tf.cumprod([a, b, c], reverse=True)  # => [a * b * c, b * c, c]
51444// ```
51445//
51446// This is more efficient than using separate `tf.reverse` ops.
51447//
51448// The `reverse` and `exclusive` kwargs can also be combined:
51449//
51450// ```python
51451// tf.cumprod([a, b, c], exclusive=True, reverse=True)  # => [b * c, c, 1]
51452// ```
51453//
51454// Arguments:
51455//	x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
51456// `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
51457// `complex128`, `qint8`, `quint8`, `qint32`, `half`.
51458//	axis: A `Tensor` of type `int32` (default: 0). Must be in the range
51459// `[-rank(x), rank(x))`.
51460func Cumprod(scope *Scope, x tf.Output, axis tf.Output, optional ...CumprodAttr) (out tf.Output) {
51461	if scope.Err() != nil {
51462		return
51463	}
51464	attrs := map[string]interface{}{}
51465	for _, a := range optional {
51466		a(attrs)
51467	}
51468	opspec := tf.OpSpec{
51469		Type: "Cumprod",
51470		Input: []tf.Input{
51471			x, axis,
51472		},
51473		Attrs: attrs,
51474	}
51475	op := scope.AddOperation(opspec)
51476	return op.Output(0)
51477}
51478
51479// LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebugAttr is an optional argument to LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug.
51480type LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebugAttr func(optionalAttr)
51481
51482// LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebugTableId sets the optional table_id attribute to value.
51483// If not specified, defaults to -1
51484func LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebugTableId(value int64) LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebugAttr {
51485	return func(m optionalAttr) {
51486		m["table_id"] = value
51487	}
51488}
51489
51490// LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebugTableName sets the optional table_name attribute to value.
51491// If not specified, defaults to ""
51492func LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebugTableName(value string) LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebugAttr {
51493	return func(m optionalAttr) {
51494		m["table_name"] = value
51495	}
51496}
51497
51498// LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebugConfig sets the optional config attribute to value.
51499// If not specified, defaults to ""
51500func LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebugConfig(value string) LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebugAttr {
51501	return func(m optionalAttr) {
51502		m["config"] = value
51503	}
51504}
51505
51506// Load SGD embedding parameters.
51507//
51508// An op that loads optimization parameters into HBM for embedding. Must be
51509// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
51510// embedding table configuration. For example, this op is used to install
51511// parameters that are loaded from a checkpoint before a training loop is
51512// executed.
51513//
51514// Arguments:
51515//	parameters: Value of parameters used in the stochastic gradient descent optimization algorithm.
51516//	gradient_accumulators: Value of gradient_accumulators used in the Adadelta optimization algorithm.
51517//
51518//
51519//
51520// Returns the created operation.
51521func LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug(scope *Scope, parameters tf.Output, gradient_accumulators tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebugAttr) (o *tf.Operation) {
51522	if scope.Err() != nil {
51523		return
51524	}
51525	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
51526	for _, a := range optional {
51527		a(attrs)
51528	}
51529	opspec := tf.OpSpec{
51530		Type: "LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug",
51531		Input: []tf.Input{
51532			parameters, gradient_accumulators,
51533		},
51534		Attrs: attrs,
51535	}
51536	return scope.AddOperation(opspec)
51537}
51538
51539// QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeAttr is an optional argument to QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize.
51540type QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeAttr func(optionalAttr)
51541
51542// QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeOutType sets the optional out_type attribute to value.
51543//
51544// value: The type of the output.
51545// If not specified, defaults to DT_QUINT8
51546func QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeOutType(value tf.DataType) QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeAttr {
51547	return func(m optionalAttr) {
51548		m["out_type"] = value
51549	}
51550}
51551
51552// QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeDilations sets the optional dilations attribute to value.
51553//
51554// value: List of dilation values.
51555// If not specified, defaults to <i:1 i:1 i:1 i:1 >
51556func QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeDilations(value []int64) QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeAttr {
51557	return func(m optionalAttr) {
51558		m["dilations"] = value
51559	}
51560}
51561
51562// QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizePaddingList sets the optional padding_list attribute to value.
51563// If not specified, defaults to <>
51564func QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizePaddingList(value []int64) QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeAttr {
51565	return func(m optionalAttr) {
51566		m["padding_list"] = value
51567	}
51568}
51569
51570// Computes quantized depthwise Conv2D with Bias, Relu and Requantize.
51571//
51572// Arguments:
51573//	input: The original input tensor.
51574//	filter: The original filter tensor.
51575//	bias: The original bias tensor.
51576//	min_input: The float value that the minimum quantized input value represents.
51577//	max_input: The float value that the maximum quantized input value represents.
51578//	min_filter: The float value that the minimum quantized filter value represents.
51579//	max_filter: The float value that the maximum quantized filter value represents.
51580//	min_freezed_output: The minimum float value of the output tensor.
51581//	max_freezed_output: The maximum float value of the output tensor.
51582//	strides: List of stride values.
51583//
51584//
51585// Returns:
51586//	output: The output tensor.
51587//	min_output: The float value that the minimum quantized output value represents.
51588//	max_output: The float value that the maximum quantized output value represents.
51589func QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize(scope *Scope, input tf.Output, filter tf.Output, bias tf.Output, min_input tf.Output, max_input tf.Output, min_filter tf.Output, max_filter tf.Output, min_freezed_output tf.Output, max_freezed_output tf.Output, strides []int64, padding string, optional ...QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeAttr) (output tf.Output, min_output tf.Output, max_output tf.Output) {
51590	if scope.Err() != nil {
51591		return
51592	}
51593	attrs := map[string]interface{}{"strides": strides, "padding": padding}
51594	for _, a := range optional {
51595		a(attrs)
51596	}
51597	opspec := tf.OpSpec{
51598		Type: "QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize",
51599		Input: []tf.Input{
51600			input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output,
51601		},
51602		Attrs: attrs,
51603	}
51604	op := scope.AddOperation(opspec)
51605	return op.Output(0), op.Output(1), op.Output(2)
51606}
51607
51608// UnicodeDecodeWithOffsetsAttr is an optional argument to UnicodeDecodeWithOffsets.
51609type UnicodeDecodeWithOffsetsAttr func(optionalAttr)
51610
51611// UnicodeDecodeWithOffsetsErrors sets the optional errors attribute to value.
51612//
51613// value: Error handling policy when there is invalid formatting found in the input.
51614// The value of 'strict' will cause the operation to produce a InvalidArgument
51615// error on any invalid input formatting. A value of 'replace' (the default) will
51616// cause the operation to replace any invalid formatting in the input with the
51617// `replacement_char` codepoint. A value of 'ignore' will cause the operation to
51618// skip any invalid formatting in the input and produce no corresponding output
51619// character.
51620// If not specified, defaults to "replace"
51621func UnicodeDecodeWithOffsetsErrors(value string) UnicodeDecodeWithOffsetsAttr {
51622	return func(m optionalAttr) {
51623		m["errors"] = value
51624	}
51625}
51626
51627// UnicodeDecodeWithOffsetsReplacementChar sets the optional replacement_char attribute to value.
51628//
51629// value: The replacement character codepoint to be used in place of any invalid
51630// formatting in the input when `errors='replace'`. Any valid unicode codepoint may
51631// be used. The default value is the default unicode replacement character is
51632// 0xFFFD or U+65533.)
51633// If not specified, defaults to 65533
51634func UnicodeDecodeWithOffsetsReplacementChar(value int64) UnicodeDecodeWithOffsetsAttr {
51635	return func(m optionalAttr) {
51636		m["replacement_char"] = value
51637	}
51638}
51639
51640// UnicodeDecodeWithOffsetsReplaceControlCharacters sets the optional replace_control_characters attribute to value.
51641//
51642// value: Whether to replace the C0 control characters (00-1F) with the
51643// `replacement_char`. Default is false.
51644// If not specified, defaults to false
51645func UnicodeDecodeWithOffsetsReplaceControlCharacters(value bool) UnicodeDecodeWithOffsetsAttr {
51646	return func(m optionalAttr) {
51647		m["replace_control_characters"] = value
51648	}
51649}
51650
51651// UnicodeDecodeWithOffsetsTsplits sets the optional Tsplits attribute to value.
51652// If not specified, defaults to DT_INT64
51653func UnicodeDecodeWithOffsetsTsplits(value tf.DataType) UnicodeDecodeWithOffsetsAttr {
51654	return func(m optionalAttr) {
51655		m["Tsplits"] = value
51656	}
51657}
51658
51659// Decodes each string in `input` into a sequence of Unicode code points.
51660//
51661// The character codepoints for all strings are returned using a single vector
51662// `char_values`, with strings expanded to characters in row-major order.
51663// Similarly, the character start byte offsets are returned using a single vector
51664// `char_to_byte_starts`, with strings expanded in row-major order.
51665//
51666// The `row_splits` tensor indicates where the codepoints and start offsets for
51667// each input string begin and end within the `char_values` and
51668// `char_to_byte_starts` tensors.  In particular, the values for the `i`th
51669// string (in row-major order) are stored in the slice
51670// `[row_splits[i]:row_splits[i+1]]`. Thus:
51671//
51672// * `char_values[row_splits[i]+j]` is the Unicode codepoint for the `j`th
51673//   character in the `i`th string (in row-major order).
51674// * `char_to_bytes_starts[row_splits[i]+j]` is the start byte offset for the `j`th
51675//   character in the `i`th string (in row-major order).
51676// * `row_splits[i+1] - row_splits[i]` is the number of characters in the `i`th
51677//   string (in row-major order).
51678//
51679// Arguments:
51680//	input: The text to be decoded. Can have any shape. Note that the output is flattened
51681// to a vector of char values.
51682//	input_encoding: Text encoding of the input strings. This is any of the encodings supported
51683// by ICU ucnv algorithmic converters. Examples: `"UTF-16", "US ASCII", "UTF-8"`.
51684//
51685// Returns:
51686//	row_splits: A 1D int32 tensor containing the row splits.
51687//	char_values: A 1D int32 Tensor containing the decoded codepoints.
51688//	char_to_byte_starts: A 1D int32 Tensor containing the byte index in the input string where each
51689// character in `char_values` starts.
51690func UnicodeDecodeWithOffsets(scope *Scope, input tf.Output, input_encoding string, optional ...UnicodeDecodeWithOffsetsAttr) (row_splits tf.Output, char_values tf.Output, char_to_byte_starts tf.Output) {
51691	if scope.Err() != nil {
51692		return
51693	}
51694	attrs := map[string]interface{}{"input_encoding": input_encoding}
51695	for _, a := range optional {
51696		a(attrs)
51697	}
51698	opspec := tf.OpSpec{
51699		Type: "UnicodeDecodeWithOffsets",
51700		Input: []tf.Input{
51701			input,
51702		},
51703		Attrs: attrs,
51704	}
51705	op := scope.AddOperation(opspec)
51706	return op.Output(0), op.Output(1), op.Output(2)
51707}
51708
51709// Removes keys and its associated values from a table.
51710//
51711// The tensor `keys` must of the same type as the keys of the table. Keys not
51712// already in the table are silently ignored.
51713//
51714// Arguments:
51715//	table_handle: Handle to the table.
51716//	keys: Any shape.  Keys of the elements to remove.
51717//
51718// Returns the created operation.
51719func LookupTableRemoveV2(scope *Scope, table_handle tf.Output, keys tf.Output) (o *tf.Operation) {
51720	if scope.Err() != nil {
51721		return
51722	}
51723	opspec := tf.OpSpec{
51724		Type: "LookupTableRemoveV2",
51725		Input: []tf.Input{
51726			table_handle, keys,
51727		},
51728	}
51729	return scope.AddOperation(opspec)
51730}
51731
51732// NotEqualAttr is an optional argument to NotEqual.
51733type NotEqualAttr func(optionalAttr)
51734
51735// NotEqualIncompatibleShapeError sets the optional incompatible_shape_error attribute to value.
51736// If not specified, defaults to true
51737func NotEqualIncompatibleShapeError(value bool) NotEqualAttr {
51738	return func(m optionalAttr) {
51739		m["incompatible_shape_error"] = value
51740	}
51741}
51742
51743// Returns the truth value of (x != y) element-wise.
51744//
51745// *NOTE*: `NotEqual` supports broadcasting. More about broadcasting
51746// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
51747func NotEqual(scope *Scope, x tf.Output, y tf.Output, optional ...NotEqualAttr) (z tf.Output) {
51748	if scope.Err() != nil {
51749		return
51750	}
51751	attrs := map[string]interface{}{}
51752	for _, a := range optional {
51753		a(attrs)
51754	}
51755	opspec := tf.OpSpec{
51756		Type: "NotEqual",
51757		Input: []tf.Input{
51758			x, y,
51759		},
51760		Attrs: attrs,
51761	}
51762	op := scope.AddOperation(opspec)
51763	return op.Output(0)
51764}
51765
51766// RetrieveTPUEmbeddingAdagradParametersAttr is an optional argument to RetrieveTPUEmbeddingAdagradParameters.
51767type RetrieveTPUEmbeddingAdagradParametersAttr func(optionalAttr)
51768
51769// RetrieveTPUEmbeddingAdagradParametersTableId sets the optional table_id attribute to value.
51770// If not specified, defaults to -1
51771func RetrieveTPUEmbeddingAdagradParametersTableId(value int64) RetrieveTPUEmbeddingAdagradParametersAttr {
51772	return func(m optionalAttr) {
51773		m["table_id"] = value
51774	}
51775}
51776
51777// RetrieveTPUEmbeddingAdagradParametersTableName sets the optional table_name attribute to value.
51778// If not specified, defaults to ""
51779func RetrieveTPUEmbeddingAdagradParametersTableName(value string) RetrieveTPUEmbeddingAdagradParametersAttr {
51780	return func(m optionalAttr) {
51781		m["table_name"] = value
51782	}
51783}
51784
51785// RetrieveTPUEmbeddingAdagradParametersConfig sets the optional config attribute to value.
51786// If not specified, defaults to ""
51787func RetrieveTPUEmbeddingAdagradParametersConfig(value string) RetrieveTPUEmbeddingAdagradParametersAttr {
51788	return func(m optionalAttr) {
51789		m["config"] = value
51790	}
51791}
51792
51793// Retrieve Adagrad embedding parameters.
51794//
51795// An op that retrieves optimization parameters from embedding to host
51796// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
51797// the correct embedding table configuration. For example, this op is
51798// used to retrieve updated parameters before saving a checkpoint.
51799//
51800// Returns:
51801//	parameters: Parameter parameters updated by the Adagrad optimization algorithm.
51802//	accumulators: Parameter accumulators updated by the Adagrad optimization algorithm.
51803func RetrieveTPUEmbeddingAdagradParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingAdagradParametersAttr) (parameters tf.Output, accumulators tf.Output) {
51804	if scope.Err() != nil {
51805		return
51806	}
51807	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
51808	for _, a := range optional {
51809		a(attrs)
51810	}
51811	opspec := tf.OpSpec{
51812		Type: "RetrieveTPUEmbeddingAdagradParameters",
51813
51814		Attrs: attrs,
51815	}
51816	op := scope.AddOperation(opspec)
51817	return op.Output(0), op.Output(1)
51818}
51819
51820// Concatenates quantized tensors along one dimension.
51821//
51822// Arguments:
51823//	concat_dim: 0-D.  The dimension along which to concatenate.  Must be in the
51824// range [0, rank(values)).
51825//	values: The `N` Tensors to concatenate. Their ranks and types must match,
51826// and their sizes must match in all dimensions except `concat_dim`.
51827//	input_mins: The minimum scalar values for each of the input tensors.
51828//	input_maxes: The maximum scalar values for each of the input tensors.
51829//
51830// Returns:
51831//	output: A `Tensor` with the concatenation of values stacked along the
51832// `concat_dim` dimension.  This tensor's shape matches that of `values` except
51833// in `concat_dim` where it has the sum of the sizes.
51834//	output_min: The float value that the minimum quantized output value represents.
51835//	output_max: The float value that the maximum quantized output value represents.
51836func QuantizedConcat(scope *Scope, concat_dim tf.Output, values []tf.Output, input_mins []tf.Output, input_maxes []tf.Output) (output tf.Output, output_min tf.Output, output_max tf.Output) {
51837	if scope.Err() != nil {
51838		return
51839	}
51840	opspec := tf.OpSpec{
51841		Type: "QuantizedConcat",
51842		Input: []tf.Input{
51843			concat_dim, tf.OutputList(values), tf.OutputList(input_mins), tf.OutputList(input_maxes),
51844		},
51845	}
51846	op := scope.AddOperation(opspec)
51847	return op.Output(0), op.Output(1), op.Output(2)
51848}
51849
51850// Returns the batched diagonal part of a batched tensor.
51851//
51852// Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched
51853// `input`.
51854//
51855// Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`.
51856// Let `max_diag_len` be the maximum length among all diagonals to be extracted,
51857// `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
51858// Let `num_diags` be the number of diagonals to extract,
51859// `num_diags = k[1] - k[0] + 1`.
51860//
51861// If `num_diags == 1`, the output tensor is of rank `r - 1` with shape
51862// `[I, J, ..., L, max_diag_len]` and values:
51863//
51864// ```
51865// diagonal[i, j, ..., l, n]
51866//   = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,
51867//     padding_value                 ; otherwise.
51868// ```
51869// where `y = max(-k[1], 0)`, `x = max(k[1], 0)`.
51870//
51871// Otherwise, the output tensor has rank `r` with dimensions
51872// `[I, J, ..., L, num_diags, max_diag_len]` with values:
51873//
51874// ```
51875// diagonal[i, j, ..., l, m, n]
51876//   = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,
51877//     padding_value                 ; otherwise.
51878// ```
51879// where `d = k[1] - m`, `y = max(-d, 0)`, and `x = max(d, 0)`.
51880//
51881// The input must be at least a matrix.
51882//
51883// For example:
51884//
51885// ```
51886// input = np.array([[[1, 2, 3, 4],  # Input shape: (2, 3, 4)
51887//                    [5, 6, 7, 8],
51888//                    [9, 8, 7, 6]],
51889//                   [[5, 4, 3, 2],
51890//                    [1, 2, 3, 4],
51891//                    [5, 6, 7, 8]]])
51892//
51893// # A main diagonal from each batch.
51894// tf.matrix_diag_part(input) ==> [[1, 6, 7],  # Output shape: (2, 3)
51895//                                 [5, 2, 7]]
51896//
51897// # A superdiagonal from each batch.
51898// tf.matrix_diag_part(input, k = 1)
51899//   ==> [[2, 7, 6],  # Output shape: (2, 3)
51900//        [4, 3, 8]]
51901//
51902// # A tridiagonal band from each batch.
51903// tf.matrix_diag_part(input, k = (-1, 1))
51904//   ==> [[[2, 7, 6],  # Output shape: (2, 3, 3)
51905//         [1, 6, 7],
51906//         [5, 8, 0]],
51907//        [[4, 3, 8],
51908//         [5, 2, 7],
51909//         [1, 6, 0]]]
51910//
51911// # Padding value = 9
51912// tf.matrix_diag_part(input, k = (1, 3), padding_value = 9)
51913//   ==> [[[4, 9, 9],  # Output shape: (2, 3, 3)
51914//         [3, 8, 9],
51915//         [2, 7, 6]],
51916//        [[2, 9, 9],
51917//         [3, 4, 9],
51918//         [4, 3, 8]]]
51919// ```
51920//
51921// Arguments:
51922//	input: Rank `r` tensor where `r >= 2`.
51923//	k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
51924// diagonal, and negative value means subdiagonals. `k` can be a single integer
51925// (for a single diagonal) or a pair of integers specifying the low and high ends
51926// of a matrix band. `k[0]` must not be larger than `k[1]`.
51927//	padding_value: The value to fill the area outside the specified diagonal band with.
51928// Default is 0.
51929//
51930// Returns The extracted diagonal(s).
51931func MatrixDiagPartV2(scope *Scope, input tf.Output, k tf.Output, padding_value tf.Output) (diagonal tf.Output) {
51932	if scope.Err() != nil {
51933		return
51934	}
51935	opspec := tf.OpSpec{
51936		Type: "MatrixDiagPartV2",
51937		Input: []tf.Input{
51938			input, k, padding_value,
51939		},
51940	}
51941	op := scope.AddOperation(opspec)
51942	return op.Output(0)
51943}
51944
51945// Returns x / y element-wise.
51946//
51947// *NOTE*: `Div` supports broadcasting. More about broadcasting
51948// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
51949func Div(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
51950	if scope.Err() != nil {
51951		return
51952	}
51953	opspec := tf.OpSpec{
51954		Type: "Div",
51955		Input: []tf.Input{
51956			x, y,
51957		},
51958	}
51959	op := scope.AddOperation(opspec)
51960	return op.Output(0)
51961}
51962
51963// LoadTPUEmbeddingProximalAdagradParametersGradAccumDebugAttr is an optional argument to LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug.
51964type LoadTPUEmbeddingProximalAdagradParametersGradAccumDebugAttr func(optionalAttr)
51965
51966// LoadTPUEmbeddingProximalAdagradParametersGradAccumDebugTableId sets the optional table_id attribute to value.
51967// If not specified, defaults to -1
51968func LoadTPUEmbeddingProximalAdagradParametersGradAccumDebugTableId(value int64) LoadTPUEmbeddingProximalAdagradParametersGradAccumDebugAttr {
51969	return func(m optionalAttr) {
51970		m["table_id"] = value
51971	}
51972}
51973
51974// LoadTPUEmbeddingProximalAdagradParametersGradAccumDebugTableName sets the optional table_name attribute to value.
51975// If not specified, defaults to ""
51976func LoadTPUEmbeddingProximalAdagradParametersGradAccumDebugTableName(value string) LoadTPUEmbeddingProximalAdagradParametersGradAccumDebugAttr {
51977	return func(m optionalAttr) {
51978		m["table_name"] = value
51979	}
51980}
51981
51982// LoadTPUEmbeddingProximalAdagradParametersGradAccumDebugConfig sets the optional config attribute to value.
51983// If not specified, defaults to ""
51984func LoadTPUEmbeddingProximalAdagradParametersGradAccumDebugConfig(value string) LoadTPUEmbeddingProximalAdagradParametersGradAccumDebugAttr {
51985	return func(m optionalAttr) {
51986		m["config"] = value
51987	}
51988}
51989
51990// Load proximal Adagrad embedding parameters with debug support.
51991//
51992// An op that loads optimization parameters into HBM for embedding. Must be
51993// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
51994// embedding table configuration. For example, this op is used to install
51995// parameters that are loaded from a checkpoint before a training loop is
51996// executed.
51997//
51998// Arguments:
51999//	parameters: Value of parameters used in the proximal Adagrad optimization algorithm.
52000//	accumulators: Value of accumulators used in the proximal Adagrad optimization algorithm.
52001//	gradient_accumulators: Value of gradient_accumulators used in the proximal Adagrad optimization algorithm.
52002//
52003//
52004//
52005// Returns the created operation.
52006func LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug(scope *Scope, parameters tf.Output, accumulators tf.Output, gradient_accumulators tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingProximalAdagradParametersGradAccumDebugAttr) (o *tf.Operation) {
52007	if scope.Err() != nil {
52008		return
52009	}
52010	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
52011	for _, a := range optional {
52012		a(attrs)
52013	}
52014	opspec := tf.OpSpec{
52015		Type: "LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug",
52016		Input: []tf.Input{
52017			parameters, accumulators, gradient_accumulators,
52018		},
52019		Attrs: attrs,
52020	}
52021	return scope.AddOperation(opspec)
52022}
52023
52024// EnqueueTPUEmbeddingRaggedTensorBatchAttr is an optional argument to EnqueueTPUEmbeddingRaggedTensorBatch.
52025type EnqueueTPUEmbeddingRaggedTensorBatchAttr func(optionalAttr)
52026
52027// EnqueueTPUEmbeddingRaggedTensorBatchDeviceOrdinal sets the optional device_ordinal attribute to value.
52028//
52029// value: The TPU device to use. Should be >= 0 and less than the number
52030// of TPU cores in the task on which the node is placed.
52031// If not specified, defaults to -1
52032func EnqueueTPUEmbeddingRaggedTensorBatchDeviceOrdinal(value int64) EnqueueTPUEmbeddingRaggedTensorBatchAttr {
52033	return func(m optionalAttr) {
52034		m["device_ordinal"] = value
52035	}
52036}
52037
52038// EnqueueTPUEmbeddingRaggedTensorBatchCombiners sets the optional combiners attribute to value.
52039//
52040// value: A list of string scalars, one for each embedding table that specify
52041// how to normalize the embedding activations after weighted summation.
52042// Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have
52043// the sum of the weights be 0 for 'mean' or the sum of the squared weights be
52044// 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for
52045// all tables.
52046// If not specified, defaults to <>
52047func EnqueueTPUEmbeddingRaggedTensorBatchCombiners(value []string) EnqueueTPUEmbeddingRaggedTensorBatchAttr {
52048	return func(m optionalAttr) {
52049		m["combiners"] = value
52050	}
52051}
52052
52053// EnqueueTPUEmbeddingRaggedTensorBatchMaxSequenceLengths sets the optional max_sequence_lengths attribute to value.
52054// If not specified, defaults to <>
52055func EnqueueTPUEmbeddingRaggedTensorBatchMaxSequenceLengths(value []int64) EnqueueTPUEmbeddingRaggedTensorBatchAttr {
52056	return func(m optionalAttr) {
52057		m["max_sequence_lengths"] = value
52058	}
52059}
52060
52061// EnqueueTPUEmbeddingRaggedTensorBatchNumFeatures sets the optional num_features attribute to value.
52062// If not specified, defaults to <>
52063func EnqueueTPUEmbeddingRaggedTensorBatchNumFeatures(value []int64) EnqueueTPUEmbeddingRaggedTensorBatchAttr {
52064	return func(m optionalAttr) {
52065		m["num_features"] = value
52066	}
52067}
52068
52069// Eases the porting of code that uses tf.nn.embedding_lookup().
52070//
52071// sample_splits[i], embedding_indices[i] and aggregation_weights[i] correspond
52072// to the ith feature. table_ids[i] indicates which embedding table to look up ith
52073// feature.
52074//
52075// The tensors at corresponding positions in two of the input lists,
52076// embedding_indices and aggregation_weights, must have the same shape, i.e. rank 1
52077// with dim_size() equal to the total number of lookups into the table described by
52078// the corresponding feature.
52079//
52080// Arguments:
52081//	sample_splits: A list of rank 1 Tensors specifying the break points for splitting
52082// embedding_indices and aggregation_weights into rows.
52083// It corresponds to ids.row_splits in embedding_lookup(), when ids is a
52084// RaggedTensor.
52085//	embedding_indices: A list of rank 1 Tensors, indices into the embedding tables.
52086// It corresponds to ids.values in embedding_lookup(), when ids is a RaggedTensor.
52087//	aggregation_weights: A list of rank 1 Tensors containing per training example
52088// aggregation weights. It corresponds to the values field of a RaggedTensor
52089// with the same row_splits as ids in embedding_lookup(), when ids is a
52090// RaggedTensor.
52091//	mode_override: A string input that overrides the mode specified in the
52092// TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',
52093// 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set
52094// in TPUEmbeddingConfiguration is used, otherwise mode_override is used.
52095//	table_ids: A list of integers specifying the identifier of the embedding table
52096// (offset of TableDescriptor in the TPUEmbeddingConfiguration) to lookup the
52097// corresponding input. The ith input is looked up using table_ids[i]. The size
52098// of the table_ids list must be equal to that of sample_indices,
52099// embedding_indices and aggregation_weights.
52100//
52101// Returns the created operation.
52102func EnqueueTPUEmbeddingRaggedTensorBatch(scope *Scope, sample_splits []tf.Output, embedding_indices []tf.Output, aggregation_weights []tf.Output, mode_override tf.Output, table_ids []int64, optional ...EnqueueTPUEmbeddingRaggedTensorBatchAttr) (o *tf.Operation) {
52103	if scope.Err() != nil {
52104		return
52105	}
52106	attrs := map[string]interface{}{"table_ids": table_ids}
52107	for _, a := range optional {
52108		a(attrs)
52109	}
52110	opspec := tf.OpSpec{
52111		Type: "EnqueueTPUEmbeddingRaggedTensorBatch",
52112		Input: []tf.Input{
52113			tf.OutputList(sample_splits), tf.OutputList(embedding_indices), tf.OutputList(aggregation_weights), mode_override,
52114		},
52115		Attrs: attrs,
52116	}
52117	return scope.AddOperation(opspec)
52118}
52119
52120// Returns the number of gradients aggregated in the given accumulators.
52121//
52122// Arguments:
52123//	handle: The handle to an accumulator.
52124//
52125// Returns The number of gradients aggregated in the given accumulator.
52126func ResourceAccumulatorNumAccumulated(scope *Scope, handle tf.Output) (num_accumulated tf.Output) {
52127	if scope.Err() != nil {
52128		return
52129	}
52130	opspec := tf.OpSpec{
52131		Type: "ResourceAccumulatorNumAccumulated",
52132		Input: []tf.Input{
52133			handle,
52134		},
52135	}
52136	op := scope.AddOperation(opspec)
52137	return op.Output(0)
52138}
52139
52140// Connects N outputs from an N-way replicated TPU computation.
52141//
52142// This operation holds a replicated output from a `tpu.replicate()` computation subgraph.
52143// Each replicated output has the same shape and type alongside the input.
52144//
52145// For example:
52146// ```
52147// %computation = "tf.Computation"()
52148// %replicated_output:2 = "tf.TPUReplicatedOutput"(%computation)
52149// ```
52150// The above computation has a replicated output of two replicas.
52151func TPUReplicatedOutput(scope *Scope, input tf.Output, num_replicas int64) (outputs []tf.Output) {
52152	if scope.Err() != nil {
52153		return
52154	}
52155	attrs := map[string]interface{}{"num_replicas": num_replicas}
52156	opspec := tf.OpSpec{
52157		Type: "TPUReplicatedOutput",
52158		Input: []tf.Input{
52159			input,
52160		},
52161		Attrs: attrs,
52162	}
52163	op := scope.AddOperation(opspec)
52164	if scope.Err() != nil {
52165		return
52166	}
52167	var idx int
52168	var err error
52169	if outputs, idx, err = makeOutputList(op, idx, "outputs"); err != nil {
52170		scope.UpdateErr("TPUReplicatedOutput", err)
52171		return
52172	}
52173	return outputs
52174}
52175