• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15// DO NOT EDIT
16// This file was machine generated by github.com/tensorflow/tensorflow/tensorflow/go/genop/internal
17//
18// WARNING: This generation of wrapper function for TensorFlow ops is in an
19// experimental state. The generated API can change without notice.
20
21package op
22
23import tf "github.com/tensorflow/tensorflow/tensorflow/go"
24
25// optionalAttr is an intentionally un-exported type to hide
26// details of how optional attributes to operations are implemented.
27type optionalAttr map[string]interface{}
28
29func makeOutputList(op *tf.Operation, start int, output string) ([]tf.Output, int, error) {
30	size, err := op.OutputListSize(output)
31	if err != nil {
32		return nil, start, err
33	}
34	list := make([]tf.Output, size)
35	for i := 0; i < size; i++ {
36		list[i] = op.Output(start + i)
37	}
38	return list, start + size, nil
39}
40
41// FakeQuantWithMinMaxVarsPerChannelGradientAttr is an optional argument to FakeQuantWithMinMaxVarsPerChannelGradient.
42type FakeQuantWithMinMaxVarsPerChannelGradientAttr func(optionalAttr)
43
44// FakeQuantWithMinMaxVarsPerChannelGradientNumBits sets the optional num_bits attribute to value.
45//
46// value: The bitwidth of the quantization; between 2 and 16, inclusive.
47// If not specified, defaults to 8
48func FakeQuantWithMinMaxVarsPerChannelGradientNumBits(value int64) FakeQuantWithMinMaxVarsPerChannelGradientAttr {
49	return func(m optionalAttr) {
50		m["num_bits"] = value
51	}
52}
53
54// FakeQuantWithMinMaxVarsPerChannelGradientNarrowRange sets the optional narrow_range attribute to value.
55//
56// value: Whether to quantize into 2^num_bits - 1 distinct values.
57// If not specified, defaults to false
58func FakeQuantWithMinMaxVarsPerChannelGradientNarrowRange(value bool) FakeQuantWithMinMaxVarsPerChannelGradientAttr {
59	return func(m optionalAttr) {
60		m["narrow_range"] = value
61	}
62}
63
64// Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation.
65//
66// Arguments:
67//	gradients: Backpropagated gradients above the FakeQuantWithMinMaxVars operation,
68// shape one of: `[d]`, `[b, d]`,  `[b, h, w, d]`.
69//	inputs: Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape
70//   same as `gradients`.
71// min, max: Quantization interval, floats of shape `[d]`.
72//
73//
74//
75// Returns Backpropagated gradients w.r.t. inputs, shape same as
76// `inputs`:
77//   `gradients * (inputs >= min && inputs <= max)`.Backpropagated gradients w.r.t. min parameter, shape `[d]`:
78// `sum_per_d(gradients * (inputs < min))`.Backpropagated gradients w.r.t. max parameter, shape `[d]`:
79// `sum_per_d(gradients * (inputs > max))`.
80func FakeQuantWithMinMaxVarsPerChannelGradient(scope *Scope, gradients tf.Output, inputs tf.Output, min tf.Output, max tf.Output, optional ...FakeQuantWithMinMaxVarsPerChannelGradientAttr) (backprops_wrt_input tf.Output, backprop_wrt_min tf.Output, backprop_wrt_max tf.Output) {
81	if scope.Err() != nil {
82		return
83	}
84	attrs := map[string]interface{}{}
85	for _, a := range optional {
86		a(attrs)
87	}
88	opspec := tf.OpSpec{
89		Type: "FakeQuantWithMinMaxVarsPerChannelGradient",
90		Input: []tf.Input{
91			gradients, inputs, min, max,
92		},
93		Attrs: attrs,
94	}
95	op := scope.AddOperation(opspec)
96	return op.Output(0), op.Output(1), op.Output(2)
97}
98
99// FakeQuantWithMinMaxVarsPerChannelAttr is an optional argument to FakeQuantWithMinMaxVarsPerChannel.
100type FakeQuantWithMinMaxVarsPerChannelAttr func(optionalAttr)
101
102// FakeQuantWithMinMaxVarsPerChannelNumBits sets the optional num_bits attribute to value.
103// If not specified, defaults to 8
104func FakeQuantWithMinMaxVarsPerChannelNumBits(value int64) FakeQuantWithMinMaxVarsPerChannelAttr {
105	return func(m optionalAttr) {
106		m["num_bits"] = value
107	}
108}
109
110// FakeQuantWithMinMaxVarsPerChannelNarrowRange sets the optional narrow_range attribute to value.
111// If not specified, defaults to false
112func FakeQuantWithMinMaxVarsPerChannelNarrowRange(value bool) FakeQuantWithMinMaxVarsPerChannelAttr {
113	return func(m optionalAttr) {
114		m["narrow_range"] = value
115	}
116}
117
118// Fake-quantize the 'inputs' tensor of type float and one of the shapes: `[d]`,
119//
120// `[b, d]` `[b, h, w, d]` via per-channel floats `min` and `max` of shape `[d]`
121// to 'outputs' tensor of same shape as `inputs`.
122//
123// `[min; max]` define the clamping range for the `inputs` data.
124// `inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]`
125// when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and
126// then de-quantized and output as floats in `[min; max]` interval.
127// `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.
128//
129// This operation has a gradient and thus allows for training `min` and `max`
130// values.
131func FakeQuantWithMinMaxVarsPerChannel(scope *Scope, inputs tf.Output, min tf.Output, max tf.Output, optional ...FakeQuantWithMinMaxVarsPerChannelAttr) (outputs tf.Output) {
132	if scope.Err() != nil {
133		return
134	}
135	attrs := map[string]interface{}{}
136	for _, a := range optional {
137		a(attrs)
138	}
139	opspec := tf.OpSpec{
140		Type: "FakeQuantWithMinMaxVarsPerChannel",
141		Input: []tf.Input{
142			inputs, min, max,
143		},
144		Attrs: attrs,
145	}
146	op := scope.AddOperation(opspec)
147	return op.Output(0)
148}
149
150// FakeQuantWithMinMaxVarsGradientAttr is an optional argument to FakeQuantWithMinMaxVarsGradient.
151type FakeQuantWithMinMaxVarsGradientAttr func(optionalAttr)
152
153// FakeQuantWithMinMaxVarsGradientNumBits sets the optional num_bits attribute to value.
154//
155// value: The bitwidth of the quantization; between 2 and 8, inclusive.
156// If not specified, defaults to 8
157func FakeQuantWithMinMaxVarsGradientNumBits(value int64) FakeQuantWithMinMaxVarsGradientAttr {
158	return func(m optionalAttr) {
159		m["num_bits"] = value
160	}
161}
162
163// FakeQuantWithMinMaxVarsGradientNarrowRange sets the optional narrow_range attribute to value.
164//
165// value: Whether to quantize into 2^num_bits - 1 distinct values.
166// If not specified, defaults to false
167func FakeQuantWithMinMaxVarsGradientNarrowRange(value bool) FakeQuantWithMinMaxVarsGradientAttr {
168	return func(m optionalAttr) {
169		m["narrow_range"] = value
170	}
171}
172
173// Compute gradients for a FakeQuantWithMinMaxVars operation.
174//
175// Arguments:
176//	gradients: Backpropagated gradients above the FakeQuantWithMinMaxVars operation.
177//	inputs: Values passed as inputs to the FakeQuantWithMinMaxVars operation.
178// min, max: Quantization interval, scalar floats.
179//
180//
181//
182// Returns Backpropagated gradients w.r.t. inputs:
183// `gradients * (inputs >= min && inputs <= max)`.Backpropagated gradients w.r.t. min parameter:
184// `sum(gradients * (inputs < min))`.Backpropagated gradients w.r.t. max parameter:
185// `sum(gradients * (inputs > max))`.
186func FakeQuantWithMinMaxVarsGradient(scope *Scope, gradients tf.Output, inputs tf.Output, min tf.Output, max tf.Output, optional ...FakeQuantWithMinMaxVarsGradientAttr) (backprops_wrt_input tf.Output, backprop_wrt_min tf.Output, backprop_wrt_max tf.Output) {
187	if scope.Err() != nil {
188		return
189	}
190	attrs := map[string]interface{}{}
191	for _, a := range optional {
192		a(attrs)
193	}
194	opspec := tf.OpSpec{
195		Type: "FakeQuantWithMinMaxVarsGradient",
196		Input: []tf.Input{
197			gradients, inputs, min, max,
198		},
199		Attrs: attrs,
200	}
201	op := scope.AddOperation(opspec)
202	return op.Output(0), op.Output(1), op.Output(2)
203}
204
205// FakeQuantWithMinMaxArgsGradientAttr is an optional argument to FakeQuantWithMinMaxArgsGradient.
206type FakeQuantWithMinMaxArgsGradientAttr func(optionalAttr)
207
208// FakeQuantWithMinMaxArgsGradientMin sets the optional min attribute to value.
209// If not specified, defaults to -6
210func FakeQuantWithMinMaxArgsGradientMin(value float32) FakeQuantWithMinMaxArgsGradientAttr {
211	return func(m optionalAttr) {
212		m["min"] = value
213	}
214}
215
216// FakeQuantWithMinMaxArgsGradientMax sets the optional max attribute to value.
217// If not specified, defaults to 6
218func FakeQuantWithMinMaxArgsGradientMax(value float32) FakeQuantWithMinMaxArgsGradientAttr {
219	return func(m optionalAttr) {
220		m["max"] = value
221	}
222}
223
224// FakeQuantWithMinMaxArgsGradientNumBits sets the optional num_bits attribute to value.
225// If not specified, defaults to 8
226func FakeQuantWithMinMaxArgsGradientNumBits(value int64) FakeQuantWithMinMaxArgsGradientAttr {
227	return func(m optionalAttr) {
228		m["num_bits"] = value
229	}
230}
231
232// FakeQuantWithMinMaxArgsGradientNarrowRange sets the optional narrow_range attribute to value.
233// If not specified, defaults to false
234func FakeQuantWithMinMaxArgsGradientNarrowRange(value bool) FakeQuantWithMinMaxArgsGradientAttr {
235	return func(m optionalAttr) {
236		m["narrow_range"] = value
237	}
238}
239
240// Compute gradients for a FakeQuantWithMinMaxArgs operation.
241//
242// Arguments:
243//	gradients: Backpropagated gradients above the FakeQuantWithMinMaxArgs operation.
244//	inputs: Values passed as inputs to the FakeQuantWithMinMaxArgs operation.
245//
246// Returns Backpropagated gradients below the FakeQuantWithMinMaxArgs operation:
247// `gradients * (inputs >= min && inputs <= max)`.
248func FakeQuantWithMinMaxArgsGradient(scope *Scope, gradients tf.Output, inputs tf.Output, optional ...FakeQuantWithMinMaxArgsGradientAttr) (backprops tf.Output) {
249	if scope.Err() != nil {
250		return
251	}
252	attrs := map[string]interface{}{}
253	for _, a := range optional {
254		a(attrs)
255	}
256	opspec := tf.OpSpec{
257		Type: "FakeQuantWithMinMaxArgsGradient",
258		Input: []tf.Input{
259			gradients, inputs,
260		},
261		Attrs: attrs,
262	}
263	op := scope.AddOperation(opspec)
264	return op.Output(0)
265}
266
267// FakeQuantWithMinMaxArgsAttr is an optional argument to FakeQuantWithMinMaxArgs.
268type FakeQuantWithMinMaxArgsAttr func(optionalAttr)
269
270// FakeQuantWithMinMaxArgsMin sets the optional min attribute to value.
271// If not specified, defaults to -6
272func FakeQuantWithMinMaxArgsMin(value float32) FakeQuantWithMinMaxArgsAttr {
273	return func(m optionalAttr) {
274		m["min"] = value
275	}
276}
277
278// FakeQuantWithMinMaxArgsMax sets the optional max attribute to value.
279// If not specified, defaults to 6
280func FakeQuantWithMinMaxArgsMax(value float32) FakeQuantWithMinMaxArgsAttr {
281	return func(m optionalAttr) {
282		m["max"] = value
283	}
284}
285
286// FakeQuantWithMinMaxArgsNumBits sets the optional num_bits attribute to value.
287// If not specified, defaults to 8
288func FakeQuantWithMinMaxArgsNumBits(value int64) FakeQuantWithMinMaxArgsAttr {
289	return func(m optionalAttr) {
290		m["num_bits"] = value
291	}
292}
293
294// FakeQuantWithMinMaxArgsNarrowRange sets the optional narrow_range attribute to value.
295// If not specified, defaults to false
296func FakeQuantWithMinMaxArgsNarrowRange(value bool) FakeQuantWithMinMaxArgsAttr {
297	return func(m optionalAttr) {
298		m["narrow_range"] = value
299	}
300}
301
302// Fake-quantize the 'inputs' tensor, type float to 'outputs' tensor of same type.
303//
304// Attributes `[min; max]` define the clamping range for the `inputs` data.
305// `inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]`
306// when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and
307// then de-quantized and output as floats in `[min; max]` interval.
308// `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.
309//
310// Quantization is called fake since the output is still in floating point.
311func FakeQuantWithMinMaxArgs(scope *Scope, inputs tf.Output, optional ...FakeQuantWithMinMaxArgsAttr) (outputs tf.Output) {
312	if scope.Err() != nil {
313		return
314	}
315	attrs := map[string]interface{}{}
316	for _, a := range optional {
317		a(attrs)
318	}
319	opspec := tf.OpSpec{
320		Type: "FakeQuantWithMinMaxArgs",
321		Input: []tf.Input{
322			inputs,
323		},
324		Attrs: attrs,
325	}
326	op := scope.AddOperation(opspec)
327	return op.Output(0)
328}
329
330// Subtracts sparse `updates` from an existing tensor according to `indices`.
331//
332// This operation creates a new tensor by subtracting sparse `updates` from the
333// passed in `tensor`.
334// This operation is very similar to `tf.scatter_nd_sub`, except that the updates
335// are subtracted from an existing tensor (as opposed to a variable). If the memory
336// for the existing tensor cannot be re-used, a copy is made and updated.
337//
338// `indices` is an integer tensor containing indices into a new tensor of shape
339// `shape`.  The last dimension of `indices` can be at most the rank of `shape`:
340//
341//     indices.shape[-1] <= shape.rank
342//
343// The last dimension of `indices` corresponds to indices into elements
344// (if `indices.shape[-1] = shape.rank`) or slices
345// (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of
346// `shape`.  `updates` is a tensor with shape
347//
348//     indices.shape[:-1] + shape[indices.shape[-1]:]
349//
350// The simplest form of tensor_scatter_sub is to subtract individual elements
351// from a tensor by index. For example, say we want to insert 4 scattered elements
352// in a rank-1 tensor with 8 elements.
353//
354// In Python, this scatter subtract operation would look like this:
355//
356// ```python
357//     indices = tf.constant([[4], [3], [1], [7]])
358//     updates = tf.constant([9, 10, 11, 12])
359//     tensor = tf.ones([8], dtype=tf.int32)
360//     updated = tf.tensor_scatter_sub(tensor, indices, updates)
361//     with tf.Session() as sess:
362//       print(sess.run(scatter))
363// ```
364//
365// The resulting tensor would look like this:
366//
367//     [1, -10, 1, -9, -8, 1, 1, -11]
368//
369// We can also, insert entire slices of a higher rank tensor all at once. For
370// example, if we wanted to insert two slices in the first dimension of a
371// rank-3 tensor with two matrices of new values.
372//
373// In Python, this scatter add operation would look like this:
374//
375// ```python
376//     indices = tf.constant([[0], [2]])
377//     updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
378//                             [7, 7, 7, 7], [8, 8, 8, 8]],
379//                            [[5, 5, 5, 5], [6, 6, 6, 6],
380//                             [7, 7, 7, 7], [8, 8, 8, 8]]])
381//     tensor = tf.ones([4, 4, 4])
382//     updated = tf.tensor_scatter_sub(tensor, indices, updates)
383//     with tf.Session() as sess:
384//       print(sess.run(scatter))
385// ```
386//
387// The resulting tensor would look like this:
388//
389//     [[[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]],
390//      [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]],
391//      [[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]],
392//      [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]]
393//
394// Note that on CPU, if an out of bound index is found, an error is returned.
395// On GPU, if an out of bound index is found, the index is ignored.
396//
397// Arguments:
398//	tensor: Tensor to copy/update.
399//	indices: Index tensor.
400//	updates: Updates to scatter into output.
401//
402// Returns A new tensor copied from tensor and updates subtracted according to the indices.
403func TensorScatterSub(scope *Scope, tensor tf.Output, indices tf.Output, updates tf.Output) (output tf.Output) {
404	if scope.Err() != nil {
405		return
406	}
407	opspec := tf.OpSpec{
408		Type: "TensorScatterSub",
409		Input: []tf.Input{
410			tensor, indices, updates,
411		},
412	}
413	op := scope.AddOperation(opspec)
414	return op.Output(0)
415}
416
417// Scatter `updates` into an existing tensor according to `indices`.
418//
419// This operation creates a new tensor by applying sparse `updates` to the passed
420// in `tensor`.
421// This operation is very similar to `tf.scatter_nd`, except that the updates are
422// scattered onto an existing tensor (as opposed to a zero-tensor). If the memory
423// for the existing tensor cannot be re-used, a copy is made and updated.
424//
425// If `indices` contains duplicates, then their updates are accumulated (summed).
426//
427// **WARNING**: The order in which updates are applied is nondeterministic, so the
428// output will be nondeterministic if `indices` contains duplicates -- because
429// of some numerical approximation issues, numbers summed in different order
430// may yield different results.
431//
432// `indices` is an integer tensor containing indices into a new tensor of shape
433// `shape`.  The last dimension of `indices` can be at most the rank of `shape`:
434//
435//     indices.shape[-1] <= shape.rank
436//
437// The last dimension of `indices` corresponds to indices into elements
438// (if `indices.shape[-1] = shape.rank`) or slices
439// (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of
440// `shape`.  `updates` is a tensor with shape
441//
442//     indices.shape[:-1] + shape[indices.shape[-1]:]
443//
444// The simplest form of scatter is to insert individual elements in a tensor by
445// index. For example, say we want to insert 4 scattered elements in a rank-1
446// tensor with 8 elements.
447//
448// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
449// <img style="width:100%" src="https://www.tensorflow.org/images/ScatterNd1.png" alt>
450// </div>
451//
452// In Python, this scatter operation would look like this:
453//
454// ```python
455//     indices = tf.constant([[4], [3], [1], [7]])
456//     updates = tf.constant([9, 10, 11, 12])
457//     tensor = tf.ones([8], dtype=tf.int32)
458//     updated = tf.tensor_scatter_update(tensor, indices, updates)
459//     with tf.Session() as sess:
460//       print(sess.run(scatter))
461// ```
462//
463// The resulting tensor would look like this:
464//
465//     [1, 11, 1, 10, 9, 1, 1, 12]
466//
467// We can also, insert entire slices of a higher rank tensor all at once. For
468// example, if we wanted to insert two slices in the first dimension of a
469// rank-3 tensor with two matrices of new values.
470//
471// In Python, this scatter operation would look like this:
472//
473// ```python
474//     indices = tf.constant([[0], [2]])
475//     updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
476//                             [7, 7, 7, 7], [8, 8, 8, 8]],
477//                            [[5, 5, 5, 5], [6, 6, 6, 6],
478//                             [7, 7, 7, 7], [8, 8, 8, 8]]])
479//     tensor = tf.ones([4, 4, 4])
480//     updated = tf.tensor_scatter_update(tensor, indices, updates)
481//     with tf.Session() as sess:
482//       print(sess.run(scatter))
483// ```
484//
485// The resulting tensor would look like this:
486//
487//     [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
488//      [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]],
489//      [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
490//      [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]]
491//
492// Note that on CPU, if an out of bound index is found, an error is returned.
493// On GPU, if an out of bound index is found, the index is ignored.
494//
495// Arguments:
496//	tensor: Tensor to copy/update.
497//	indices: Index tensor.
498//	updates: Updates to scatter into output.
499//
500// Returns A new tensor with the given shape and updates applied according
501// to the indices.
502func TensorScatterUpdate(scope *Scope, tensor tf.Output, indices tf.Output, updates tf.Output) (output tf.Output) {
503	if scope.Err() != nil {
504		return
505	}
506	opspec := tf.OpSpec{
507		Type: "TensorScatterUpdate",
508		Input: []tf.Input{
509			tensor, indices, updates,
510		},
511	}
512	op := scope.AddOperation(opspec)
513	return op.Output(0)
514}
515
516// Scatter `updates` into a new tensor according to `indices`.
517//
518// Creates a new tensor by applying sparse `updates` to individual values or
519// slices within a tensor (initially zero for numeric, empty for string) of
520// the given `shape` according to indices.  This operator is the inverse of the
521// `tf.gather_nd` operator which extracts values or slices from a given tensor.
522//
523// This operation is similar to tensor_scatter_add, except that the tensor is
524// zero-initialized. Calling `tf.scatter_nd(indices, values, shape)` is identical
525// to `tensor_scatter_add(tf.zeros(shape, values.dtype), indices, values)`
526//
527// If `indices` contains duplicates, then their updates are accumulated (summed).
528//
529// **WARNING**: The order in which updates are applied is nondeterministic, so the
530// output will be nondeterministic if `indices` contains duplicates -- because
531// of some numerical approximation issues, numbers summed in different order
532// may yield different results.
533//
534// `indices` is an integer tensor containing indices into a new tensor of shape
535// `shape`.  The last dimension of `indices` can be at most the rank of `shape`:
536//
537//     indices.shape[-1] <= shape.rank
538//
539// The last dimension of `indices` corresponds to indices into elements
540// (if `indices.shape[-1] = shape.rank`) or slices
541// (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of
542// `shape`.  `updates` is a tensor with shape
543//
544//     indices.shape[:-1] + shape[indices.shape[-1]:]
545//
546// The simplest form of scatter is to insert individual elements in a tensor by
547// index. For example, say we want to insert 4 scattered elements in a rank-1
548// tensor with 8 elements.
549//
550// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
551// <img style="width:100%" src="https://www.tensorflow.org/images/ScatterNd1.png" alt>
552// </div>
553//
554// In Python, this scatter operation would look like this:
555//
556// ```python
557//     indices = tf.constant([[4], [3], [1], [7]])
558//     updates = tf.constant([9, 10, 11, 12])
559//     shape = tf.constant([8])
560//     scatter = tf.scatter_nd(indices, updates, shape)
561//     with tf.Session() as sess:
562//       print(sess.run(scatter))
563// ```
564//
565// The resulting tensor would look like this:
566//
567//     [0, 11, 0, 10, 9, 0, 0, 12]
568//
569// We can also, insert entire slices of a higher rank tensor all at once. For
570// example, if we wanted to insert two slices in the first dimension of a
571// rank-3 tensor with two matrices of new values.
572//
573// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
574// <img style="width:100%" src="https://www.tensorflow.org/images/ScatterNd2.png" alt>
575// </div>
576//
577// In Python, this scatter operation would look like this:
578//
579// ```python
580//     indices = tf.constant([[0], [2]])
581//     updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
582//                             [7, 7, 7, 7], [8, 8, 8, 8]],
583//                            [[5, 5, 5, 5], [6, 6, 6, 6],
584//                             [7, 7, 7, 7], [8, 8, 8, 8]]])
585//     shape = tf.constant([4, 4, 4])
586//     scatter = tf.scatter_nd(indices, updates, shape)
587//     with tf.Session() as sess:
588//       print(sess.run(scatter))
589// ```
590//
591// The resulting tensor would look like this:
592//
593//     [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
594//      [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
595//      [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
596//      [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]
597//
598// Note that on CPU, if an out of bound index is found, an error is returned.
599// On GPU, if an out of bound index is found, the index is ignored.
600//
601// Arguments:
602//	indices: Index tensor.
603//	updates: Updates to scatter into output.
604//	shape: 1-D. The shape of the resulting tensor.
605//
606// Returns A new tensor with the given shape and updates applied according
607// to the indices.
608func ScatterNd(scope *Scope, indices tf.Output, updates tf.Output, shape tf.Output) (output tf.Output) {
609	if scope.Err() != nil {
610		return
611	}
612	opspec := tf.OpSpec{
613		Type: "ScatterNd",
614		Input: []tf.Input{
615			indices, updates, shape,
616		},
617	}
618	op := scope.AddOperation(opspec)
619	return op.Output(0)
620}
621
622// Bitcasts a tensor from one type to another without copying data.
623//
624// Given a tensor `input`, this operation returns a tensor that has the same buffer
625// data as `input` with datatype `type`.
626//
627// If the input datatype `T` is larger than the output datatype `type` then the
628// shape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)].
629//
630// If `T` is smaller than `type`, the operator requires that the rightmost
631// dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from
632// [..., sizeof(`type`)/sizeof(`T`)] to [...].
633//
634// *NOTE*: Bitcast is implemented as a low-level cast, so machines with different
635// endian orderings will give different results.
636func Bitcast(scope *Scope, input tf.Output, type_ tf.DataType) (output tf.Output) {
637	if scope.Err() != nil {
638		return
639	}
640	attrs := map[string]interface{}{"type": type_}
641	opspec := tf.OpSpec{
642		Type: "Bitcast",
643		Input: []tf.Input{
644			input,
645		},
646		Attrs: attrs,
647	}
648	op := scope.AddOperation(opspec)
649	return op.Output(0)
650}
651
652// SpaceToDepthAttr is an optional argument to SpaceToDepth.
653type SpaceToDepthAttr func(optionalAttr)
654
655// SpaceToDepthDataFormat sets the optional data_format attribute to value.
656// If not specified, defaults to "NHWC"
657func SpaceToDepthDataFormat(value string) SpaceToDepthAttr {
658	return func(m optionalAttr) {
659		m["data_format"] = value
660	}
661}
662
663// SpaceToDepth for tensors of type T.
664//
665// Rearranges blocks of spatial data, into depth. More specifically,
666// this op outputs a copy of the input tensor where values from the `height`
667// and `width` dimensions are moved to the `depth` dimension.
668// The attr `block_size` indicates the input block size.
669//
670//   * Non-overlapping blocks of size `block_size x block size` are rearranged
671//     into depth at each location.
672//   * The depth of the output tensor is `block_size * block_size * input_depth`.
673//   * The Y, X coordinates within each block of the input become the high order
674//     component of the output channel index.
675//   * The input tensor's height and width must be divisible by block_size.
676//
677// The `data_format` attr specifies the layout of the input and output tensors
678// with the following options:
679//   "NHWC": `[ batch, height, width, channels ]`
680//   "NCHW": `[ batch, channels, height, width ]`
681//   "NCHW_VECT_C":
682//       `qint8 [ batch, channels / 4, height, width, 4 ]`
683//
684// It is useful to consider the operation as transforming a 6-D Tensor.
685// e.g. for data_format = NHWC,
686//      Each element in the input tensor can be specified via 6 coordinates,
687//      ordered by decreasing memory layout significance as:
688//      n,oY,bY,oX,bX,iC  (where n=batch index, oX, oY means X or Y coordinates
689//                         within the output image, bX, bY means coordinates
690//                         within the input block, iC means input channels).
691//      The output would be a transpose to the following layout:
692//      n,oY,oX,bY,bX,iC
693//
694// This operation is useful for resizing the activations between convolutions
695// (but keeping all data), e.g. instead of pooling. It is also useful for training
696// purely convolutional models.
697//
698// For example, given an input of shape `[1, 2, 2, 1]`, data_format = "NHWC" and
699// block_size = 2:
700//
701// ```
702// x = [[[[1], [2]],
703//       [[3], [4]]]]
704// ```
705//
706// This operation will output a tensor of shape `[1, 1, 1, 4]`:
707//
708// ```
709// [[[[1, 2, 3, 4]]]]
710// ```
711//
712// Here, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`,
713// the corresponding output will have a single element (i.e. width and height are
714// both 1) and will have a depth of 4 channels (1 * block_size * block_size).
715// The output element shape is `[1, 1, 4]`.
716//
717// For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g.
718//
719// ```
720// x = [[[[1, 2, 3], [4, 5, 6]],
721//       [[7, 8, 9], [10, 11, 12]]]]
722// ```
723//
724// This operation, for block_size of 2, will return the following tensor of shape
725// `[1, 1, 1, 12]`
726//
727// ```
728// [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
729// ```
730//
731// Similarly, for the following input of shape `[1 4 4 1]`, and a block size of 2:
732//
733// ```
734// x = [[[[1],   [2],  [5],  [6]],
735//       [[3],   [4],  [7],  [8]],
736//       [[9],  [10], [13],  [14]],
737//       [[11], [12], [15],  [16]]]]
738// ```
739//
740// the operator will return the following tensor of shape `[1 2 2 4]`:
741//
742// ```
743// x = [[[[1, 2, 3, 4],
744//        [5, 6, 7, 8]],
745//       [[9, 10, 11, 12],
746//        [13, 14, 15, 16]]]]
747// ```
748//
749// Arguments:
750//
751//	block_size: The size of the spatial block.
752func SpaceToDepth(scope *Scope, input tf.Output, block_size int64, optional ...SpaceToDepthAttr) (output tf.Output) {
753	if scope.Err() != nil {
754		return
755	}
756	attrs := map[string]interface{}{"block_size": block_size}
757	for _, a := range optional {
758		a(attrs)
759	}
760	opspec := tf.OpSpec{
761		Type: "SpaceToDepth",
762		Input: []tf.Input{
763			input,
764		},
765		Attrs: attrs,
766	}
767	op := scope.AddOperation(opspec)
768	return op.Output(0)
769}
770
771// SpaceToBatch for 4-D tensors of type T.
772//
773// This is a legacy version of the more general SpaceToBatchND.
774//
775// Zero-pads and then rearranges (permutes) blocks of spatial data into batch.
776// More specifically, this op outputs a copy of the input tensor where values from
777// the `height` and `width` dimensions are moved to the `batch` dimension. After
778// the zero-padding, both `height` and `width` of the input must be divisible by the
779// block size.
780//
781// Arguments:
782//	input: 4-D with shape `[batch, height, width, depth]`.
783//	paddings: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies
784//   the padding of the input with zeros across the spatial dimensions as follows:
785//
786//       paddings = [[pad_top, pad_bottom], [pad_left, pad_right]]
787//
788//   The effective spatial dimensions of the zero-padded input tensor will be:
789//
790//       height_pad = pad_top + height + pad_bottom
791//       width_pad = pad_left + width + pad_right
792//
793// The attr `block_size` must be greater than one. It indicates the block size.
794//
795//   * Non-overlapping blocks of size `block_size x block size` in the height and
796//     width dimensions are rearranged into the batch dimension at each location.
797//   * The batch of the output tensor is `batch * block_size * block_size`.
798//   * Both height_pad and width_pad must be divisible by block_size.
799//
800// The shape of the output will be:
801//
802//     [batch*block_size*block_size, height_pad/block_size, width_pad/block_size,
803//      depth]
804//
805// Some examples:
806//
807// (1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2:
808//
809// ```
810// x = [[[[1], [2]], [[3], [4]]]]
811// ```
812//
813// The output tensor has shape `[4, 1, 1, 1]` and value:
814//
815// ```
816// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
817// ```
818//
819// (2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2:
820//
821// ```
822// x = [[[[1, 2, 3], [4, 5, 6]],
823//       [[7, 8, 9], [10, 11, 12]]]]
824// ```
825//
826// The output tensor has shape `[4, 1, 1, 3]` and value:
827//
828// ```
829// [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
830// ```
831//
832// (3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2:
833//
834// ```
835// x = [[[[1],   [2],  [3],  [4]],
836//       [[5],   [6],  [7],  [8]],
837//       [[9],  [10], [11],  [12]],
838//       [[13], [14], [15],  [16]]]]
839// ```
840//
841// The output tensor has shape `[4, 2, 2, 1]` and value:
842//
843// ```
844// x = [[[[1], [3]], [[9], [11]]],
845//      [[[2], [4]], [[10], [12]]],
846//      [[[5], [7]], [[13], [15]]],
847//      [[[6], [8]], [[14], [16]]]]
848// ```
849//
850// (4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2:
851//
852// ```
853// x = [[[[1],   [2],  [3],  [4]],
854//       [[5],   [6],  [7],  [8]]],
855//      [[[9],  [10], [11],  [12]],
856//       [[13], [14], [15],  [16]]]]
857// ```
858//
859// The output tensor has shape `[8, 1, 2, 1]` and value:
860//
861// ```
862// x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
863//      [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
864// ```
865//
866// Among others, this operation is useful for reducing atrous convolution into
867// regular convolution.
868//
869func SpaceToBatch(scope *Scope, input tf.Output, paddings tf.Output, block_size int64) (output tf.Output) {
870	if scope.Err() != nil {
871		return
872	}
873	attrs := map[string]interface{}{"block_size": block_size}
874	opspec := tf.OpSpec{
875		Type: "SpaceToBatch",
876		Input: []tf.Input{
877			input, paddings,
878		},
879		Attrs: attrs,
880	}
881	op := scope.AddOperation(opspec)
882	return op.Output(0)
883}
884
885// SpaceToBatch for N-D tensors of type T.
886//
887// This operation divides "spatial" dimensions `[1, ..., M]` of the input into a
888// grid of blocks of shape `block_shape`, and interleaves these blocks with the
889// "batch" dimension (0) such that in the output, the spatial dimensions
890// `[1, ..., M]` correspond to the position within the grid, and the batch
891// dimension combines both the position within a spatial block and the original
892// batch position.  Prior to division into blocks, the spatial dimensions of the
893// input are optionally zero padded according to `paddings`.  See below for a
894// precise description.
895//
896// Arguments:
897//	input: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,
898// where spatial_shape has `M` dimensions.
899//	block_shape: 1-D with shape `[M]`, all values must be >= 1.
900//	paddings: 2-D with shape `[M, 2]`, all values must be >= 0.
901//   `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension
902//   `i + 1`, which corresponds to spatial dimension `i`.  It is required that
903//   `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`.
904//
905// This operation is equivalent to the following steps:
906//
907// 1. Zero-pad the start and end of dimensions `[1, ..., M]` of the
908//    input according to `paddings` to produce `padded` of shape `padded_shape`.
909//
910// 2. Reshape `padded` to `reshaped_padded` of shape:
911//
912//      [batch] +
913//      [padded_shape[1] / block_shape[0],
914//        block_shape[0],
915//       ...,
916//       padded_shape[M] / block_shape[M-1],
917//       block_shape[M-1]] +
918//      remaining_shape
919//
920// 3. Permute dimensions of `reshaped_padded` to produce
921//    `permuted_reshaped_padded` of shape:
922//
923//      block_shape +
924//      [batch] +
925//      [padded_shape[1] / block_shape[0],
926//       ...,
927//       padded_shape[M] / block_shape[M-1]] +
928//      remaining_shape
929//
930// 4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch
931//    dimension, producing an output tensor of shape:
932//
933//      [batch * prod(block_shape)] +
934//      [padded_shape[1] / block_shape[0],
935//       ...,
936//       padded_shape[M] / block_shape[M-1]] +
937//      remaining_shape
938//
939// Some examples:
940//
941// (1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and
942//     `paddings = [[0, 0], [0, 0]]`:
943//
944// ```
945// x = [[[[1], [2]], [[3], [4]]]]
946// ```
947//
948// The output tensor has shape `[4, 1, 1, 1]` and value:
949//
950// ```
951// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
952// ```
953//
954// (2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and
955//     `paddings = [[0, 0], [0, 0]]`:
956//
957// ```
958// x = [[[[1, 2, 3], [4, 5, 6]],
959//       [[7, 8, 9], [10, 11, 12]]]]
960// ```
961//
962// The output tensor has shape `[4, 1, 1, 3]` and value:
963//
964// ```
965// [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
966// ```
967//
968// (3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and
969//     `paddings = [[0, 0], [0, 0]]`:
970//
971// ```
972// x = [[[[1],   [2],  [3],  [4]],
973//       [[5],   [6],  [7],  [8]],
974//       [[9],  [10], [11],  [12]],
975//       [[13], [14], [15],  [16]]]]
976// ```
977//
978// The output tensor has shape `[4, 2, 2, 1]` and value:
979//
980// ```
981// x = [[[[1], [3]], [[9], [11]]],
982//      [[[2], [4]], [[10], [12]]],
983//      [[[5], [7]], [[13], [15]]],
984//      [[[6], [8]], [[14], [16]]]]
985// ```
986//
987// (4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and
988//     paddings = `[[0, 0], [2, 0]]`:
989//
990// ```
991// x = [[[[1],   [2],  [3],  [4]],
992//       [[5],   [6],  [7],  [8]]],
993//      [[[9],  [10], [11],  [12]],
994//       [[13], [14], [15],  [16]]]]
995// ```
996//
997// The output tensor has shape `[8, 1, 3, 1]` and value:
998//
999// ```
1000// x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
1001//      [[[0], [2], [4]]], [[[0], [10], [12]]],
1002//      [[[0], [5], [7]]], [[[0], [13], [15]]],
1003//      [[[0], [6], [8]]], [[[0], [14], [16]]]]
1004// ```
1005//
1006// Among others, this operation is useful for reducing atrous convolution into
1007// regular convolution.
1008func SpaceToBatchND(scope *Scope, input tf.Output, block_shape tf.Output, paddings tf.Output) (output tf.Output) {
1009	if scope.Err() != nil {
1010		return
1011	}
1012	opspec := tf.OpSpec{
1013		Type: "SpaceToBatchND",
1014		Input: []tf.Input{
1015			input, block_shape, paddings,
1016		},
1017	}
1018	op := scope.AddOperation(opspec)
1019	return op.Output(0)
1020}
1021
1022// Inserts a dimension of 1 into a tensor's shape.
1023//
1024// Given a tensor `input`, this operation inserts a dimension of 1 at the
1025// dimension index `axis` of `input`'s shape. The dimension index `axis` starts at
1026// zero; if you specify a negative number for `axis` it is counted backward from
1027// the end.
1028//
1029// This operation is useful if you want to add a batch dimension to a single
1030// element. For example, if you have a single image of shape `[height, width,
1031// channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,
1032// which will make the shape `[1, height, width, channels]`.
1033//
1034// Other examples:
1035//
1036// ```
1037// # 't' is a tensor of shape [2]
1038// shape(expand_dims(t, 0)) ==> [1, 2]
1039// shape(expand_dims(t, 1)) ==> [2, 1]
1040// shape(expand_dims(t, -1)) ==> [2, 1]
1041//
1042// # 't2' is a tensor of shape [2, 3, 5]
1043// shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5]
1044// shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5]
1045// shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1]
1046// ```
1047//
1048// This operation requires that:
1049//
1050// `-1-input.dims() <= dim <= input.dims()`
1051//
1052// This operation is related to `squeeze()`, which removes dimensions of
1053// size 1.
1054//
1055// Arguments:
1056//
1057//	axis: 0-D (scalar). Specifies the dimension index at which to
1058// expand the shape of `input`. Must be in the range
1059// `[-rank(input) - 1, rank(input)]`.
1060//
1061// Returns Contains the same data as `input`, but its shape has an additional
1062// dimension of size 1 added.
1063func ExpandDims(scope *Scope, input tf.Output, axis tf.Output) (output tf.Output) {
1064	if scope.Err() != nil {
1065		return
1066	}
1067	opspec := tf.OpSpec{
1068		Type: "ExpandDims",
1069		Input: []tf.Input{
1070			input, axis,
1071		},
1072	}
1073	op := scope.AddOperation(opspec)
1074	return op.Output(0)
1075}
1076
1077// A placeholder op that passes through `input` when its output is not fed.
1078//
1079// Arguments:
1080//	input: The default value to produce when `output` is not fed.
1081//	shape: The (possibly partial) shape of the tensor.
1082//
1083// Returns A placeholder tensor that defaults to `input` if it is not fed.
1084func PlaceholderWithDefault(scope *Scope, input tf.Output, shape tf.Shape) (output tf.Output) {
1085	if scope.Err() != nil {
1086		return
1087	}
1088	attrs := map[string]interface{}{"shape": shape}
1089	opspec := tf.OpSpec{
1090		Type: "PlaceholderWithDefault",
1091		Input: []tf.Input{
1092			input,
1093		},
1094		Attrs: attrs,
1095	}
1096	op := scope.AddOperation(opspec)
1097	return op.Output(0)
1098}
1099
1100// A placeholder op for a value that will be fed into the computation.
1101//
1102// DEPRECATED at GraphDef version 23: Placeholder now behaves the same as PlaceholderV2.
1103//
1104// N.B. This operation will fail with an error if it is executed. It is
1105// intended as a way to represent a value that will always be fed, and to
1106// provide attrs that enable the fed value to be checked at runtime.
1107//
1108// Arguments:
1109//	dtype: The type of elements in the tensor.
1110//	shape: The shape of the tensor. The shape can be any partially-specified
1111// shape.  To be unconstrained, pass in a shape with unknown rank.
1112//
1113// Returns A placeholder tensor that must be replaced using the feed mechanism.
1114func PlaceholderV2(scope *Scope, dtype tf.DataType, shape tf.Shape) (output tf.Output) {
1115	if scope.Err() != nil {
1116		return
1117	}
1118	attrs := map[string]interface{}{"dtype": dtype, "shape": shape}
1119	opspec := tf.OpSpec{
1120		Type: "PlaceholderV2",
1121
1122		Attrs: attrs,
1123	}
1124	op := scope.AddOperation(opspec)
1125	return op.Output(0)
1126}
1127
1128// PlaceholderAttr is an optional argument to Placeholder.
1129type PlaceholderAttr func(optionalAttr)
1130
1131// PlaceholderShape sets the optional shape attribute to value.
1132//
1133// value: (Optional) The shape of the tensor. If the shape has 0 dimensions, the
1134// shape is unconstrained.
1135// If not specified, defaults to <unknown_rank:true >
1136func PlaceholderShape(value tf.Shape) PlaceholderAttr {
1137	return func(m optionalAttr) {
1138		m["shape"] = value
1139	}
1140}
1141
1142// A placeholder op for a value that will be fed into the computation.
1143//
1144// N.B. This operation will fail with an error if it is executed. It is
1145// intended as a way to represent a value that will always be fed, and to
1146// provide attrs that enable the fed value to be checked at runtime.
1147//
1148// Arguments:
1149//	dtype: The type of elements in the tensor.
1150//
1151// Returns A placeholder tensor that must be replaced using the feed mechanism.
1152func Placeholder(scope *Scope, dtype tf.DataType, optional ...PlaceholderAttr) (output tf.Output) {
1153	if scope.Err() != nil {
1154		return
1155	}
1156	attrs := map[string]interface{}{"dtype": dtype}
1157	for _, a := range optional {
1158		a(attrs)
1159	}
1160	opspec := tf.OpSpec{
1161		Type: "Placeholder",
1162
1163		Attrs: attrs,
1164	}
1165	op := scope.AddOperation(opspec)
1166	return op.Output(0)
1167}
1168
1169// Gradient op for `MirrorPad` op. This op folds a mirror-padded tensor.
1170//
1171// This operation folds the padded areas of `input` by `MirrorPad` according to the
1172// `paddings` you specify. `paddings` must be the same as `paddings` argument
1173// given to the corresponding `MirrorPad` op.
1174//
1175// The folded size of each dimension D of the output is:
1176//
1177// `input.dim_size(D) - paddings(D, 0) - paddings(D, 1)`
1178//
1179// For example:
1180//
1181// ```
1182// # 't' is [[1, 2, 3], [4, 5, 6], [7, 8, 9]].
1183// # 'paddings' is [[0, 1]], [0, 1]].
1184// # 'mode' is SYMMETRIC.
1185// # rank of 't' is 2.
1186// pad(t, paddings) ==> [[ 1,  5]
1187//                       [11, 28]]
1188// ```
1189//
1190// Arguments:
1191//	input: The input tensor to be folded.
1192//	paddings: A two-column matrix specifying the padding sizes. The number of
1193// rows must be the same as the rank of `input`.
1194//	mode: The mode used in the `MirrorPad` op.
1195//
1196// Returns The folded tensor.
1197func MirrorPadGrad(scope *Scope, input tf.Output, paddings tf.Output, mode string) (output tf.Output) {
1198	if scope.Err() != nil {
1199		return
1200	}
1201	attrs := map[string]interface{}{"mode": mode}
1202	opspec := tf.OpSpec{
1203		Type: "MirrorPadGrad",
1204		Input: []tf.Input{
1205			input, paddings,
1206		},
1207		Attrs: attrs,
1208	}
1209	op := scope.AddOperation(opspec)
1210	return op.Output(0)
1211}
1212
1213// Pads a tensor with mirrored values.
1214//
1215// This operation pads a `input` with mirrored values according to the `paddings`
1216// you specify. `paddings` is an integer tensor with shape `[n, 2]`, where n is
1217// the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
1218// how many values to add before the contents of `input` in that dimension, and
1219// `paddings[D, 1]` indicates how many values to add after the contents of `input`
1220// in that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater
1221// than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if `copy_border` is true
1222// (if false, respectively).
1223//
1224// The padded size of each dimension D of the output is:
1225//
1226// `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
1227//
1228// For example:
1229//
1230// ```
1231// # 't' is [[1, 2, 3], [4, 5, 6]].
1232// # 'paddings' is [[1, 1]], [2, 2]].
1233// # 'mode' is SYMMETRIC.
1234// # rank of 't' is 2.
1235// pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2]
1236//                       [2, 1, 1, 2, 3, 3, 2]
1237//                       [5, 4, 4, 5, 6, 6, 5]
1238//                       [5, 4, 4, 5, 6, 6, 5]]
1239// ```
1240//
1241// Arguments:
1242//	input: The input tensor to be padded.
1243//	paddings: A two-column matrix specifying the padding sizes. The number of
1244// rows must be the same as the rank of `input`.
1245//	mode: Either `REFLECT` or `SYMMETRIC`. In reflect mode the padded regions
1246// do not include the borders, while in symmetric mode the padded regions
1247// do include the borders. For example, if `input` is `[1, 2, 3]` and `paddings`
1248// is `[0, 2]`, then the output is `[1, 2, 3, 2, 1]` in reflect mode, and
1249// it is `[1, 2, 3, 3, 2]` in symmetric mode.
1250//
1251// Returns The padded tensor.
1252func MirrorPad(scope *Scope, input tf.Output, paddings tf.Output, mode string) (output tf.Output) {
1253	if scope.Err() != nil {
1254		return
1255	}
1256	attrs := map[string]interface{}{"mode": mode}
1257	opspec := tf.OpSpec{
1258		Type: "MirrorPad",
1259		Input: []tf.Input{
1260			input, paddings,
1261		},
1262		Attrs: attrs,
1263	}
1264	op := scope.AddOperation(opspec)
1265	return op.Output(0)
1266}
1267
1268// Pads a tensor.
1269//
1270// This operation pads `input` according to the `paddings` and `constant_values`
1271// you specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is
1272// the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
1273// how many padding values to add before the contents of `input` in that dimension,
1274// and `paddings[D, 1]` indicates how many padding values to add after the contents
1275// of `input` in that dimension. `constant_values` is a scalar tensor of the same
1276// type as `input` that indicates the value to use for padding `input`.
1277//
1278// The padded size of each dimension D of the output is:
1279//
1280// `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
1281//
1282// For example:
1283//
1284// ```
1285// # 't' is [[1, 1], [2, 2]]
1286// # 'paddings' is [[1, 1], [2, 2]]
1287// # 'constant_values' is 0
1288// # rank of 't' is 2
1289// pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
1290//                       [0, 0, 1, 1, 0, 0]
1291//                       [0, 0, 2, 2, 0, 0]
1292//                       [0, 0, 0, 0, 0, 0]]
1293// ```
1294func PadV2(scope *Scope, input tf.Output, paddings tf.Output, constant_values tf.Output) (output tf.Output) {
1295	if scope.Err() != nil {
1296		return
1297	}
1298	opspec := tf.OpSpec{
1299		Type: "PadV2",
1300		Input: []tf.Input{
1301			input, paddings, constant_values,
1302		},
1303	}
1304	op := scope.AddOperation(opspec)
1305	return op.Output(0)
1306}
1307
1308// Return the reduction indices for computing gradients of s0 op s1 with broadcast.
1309//
1310// This is typically used by gradient computations for a broadcasting operation.
1311func BroadcastGradientArgs(scope *Scope, s0 tf.Output, s1 tf.Output) (r0 tf.Output, r1 tf.Output) {
1312	if scope.Err() != nil {
1313		return
1314	}
1315	opspec := tf.OpSpec{
1316		Type: "BroadcastGradientArgs",
1317		Input: []tf.Input{
1318			s0, s1,
1319		},
1320	}
1321	op := scope.AddOperation(opspec)
1322	return op.Output(0), op.Output(1)
1323}
1324
1325// Returns the gradient of `Tile`.
1326//
1327// DEPRECATED at GraphDef version 3: TileGrad has been replaced with reduce_sum
1328//
1329// Since `Tile` takes an input and repeats the input `multiples` times
1330// along each dimension, `TileGrad` takes in `multiples` and aggregates
1331// each repeated tile of `input` into `output`.
1332func TileGrad(scope *Scope, input tf.Output, multiples tf.Output) (output tf.Output) {
1333	if scope.Err() != nil {
1334		return
1335	}
1336	opspec := tf.OpSpec{
1337		Type: "TileGrad",
1338		Input: []tf.Input{
1339			input, multiples,
1340		},
1341	}
1342	op := scope.AddOperation(opspec)
1343	return op.Output(0)
1344}
1345
1346// Constructs a tensor by tiling a given tensor.
1347//
1348// This operation creates a new tensor by replicating `input` `multiples` times.
1349// The output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements,
1350// and the values of `input` are replicated `multiples[i]` times along the 'i'th
1351// dimension. For example, tiling `[a b c d]` by `[2]` produces
1352// `[a b c d a b c d]`.
1353//
1354// Arguments:
1355//	input: 1-D or higher.
1356//	multiples: 1-D. Length must be the same as the number of dimensions in `input`
1357func Tile(scope *Scope, input tf.Output, multiples tf.Output) (output tf.Output) {
1358	if scope.Err() != nil {
1359		return
1360	}
1361	opspec := tf.OpSpec{
1362		Type: "Tile",
1363		Input: []tf.Input{
1364			input, multiples,
1365		},
1366	}
1367	op := scope.AddOperation(opspec)
1368	return op.Output(0)
1369}
1370
1371// StridedSliceAttr is an optional argument to StridedSlice.
1372type StridedSliceAttr func(optionalAttr)
1373
1374// StridedSliceBeginMask sets the optional begin_mask attribute to value.
1375//
1376// value: a bitmask where a bit i being 1 means to ignore the begin
1377// value and instead use the largest interval possible. At runtime
1378// begin[i] will be replaced with `[0, n-1)` if `stride[i] > 0` or
1379// `[-1, n-1]` if `stride[i] < 0`
1380// If not specified, defaults to 0
1381func StridedSliceBeginMask(value int64) StridedSliceAttr {
1382	return func(m optionalAttr) {
1383		m["begin_mask"] = value
1384	}
1385}
1386
1387// StridedSliceEndMask sets the optional end_mask attribute to value.
1388//
1389// value: analogous to `begin_mask`
1390// If not specified, defaults to 0
1391func StridedSliceEndMask(value int64) StridedSliceAttr {
1392	return func(m optionalAttr) {
1393		m["end_mask"] = value
1394	}
1395}
1396
1397// StridedSliceEllipsisMask sets the optional ellipsis_mask attribute to value.
1398//
1399// value: a bitmask where bit `i` being 1 means the `i`th
1400// position is actually an ellipsis. One bit at most can be 1.
1401// If `ellipsis_mask == 0`, then an implicit ellipsis mask of `1 << (m+1)`
1402// is provided. This means that `foo[3:5] == foo[3:5, ...]`. An ellipsis
1403// implicitly creates as many range specifications as necessary to fully
1404// specify the sliced range for every dimension. For example for a 4-dimensional
1405// tensor `foo` the slice `foo[2, ..., 5:8]` implies `foo[2, :, :, 5:8]`.
1406// If not specified, defaults to 0
1407func StridedSliceEllipsisMask(value int64) StridedSliceAttr {
1408	return func(m optionalAttr) {
1409		m["ellipsis_mask"] = value
1410	}
1411}
1412
1413// StridedSliceNewAxisMask sets the optional new_axis_mask attribute to value.
1414//
1415// value: a bitmask where bit `i` being 1 means the `i`th
1416// specification creates a new shape 1 dimension. For example
1417// `foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor.
1418// If not specified, defaults to 0
1419func StridedSliceNewAxisMask(value int64) StridedSliceAttr {
1420	return func(m optionalAttr) {
1421		m["new_axis_mask"] = value
1422	}
1423}
1424
1425// StridedSliceShrinkAxisMask sets the optional shrink_axis_mask attribute to value.
1426//
1427// value: a bitmask where bit `i` implies that the `i`th
1428// specification should shrink the dimensionality. begin and end
1429// must imply a slice of size 1 in the dimension. For example in
1430// python one might do `foo[:, 3, :]` which would result in
1431// `shrink_axis_mask` being 2.
1432// If not specified, defaults to 0
1433func StridedSliceShrinkAxisMask(value int64) StridedSliceAttr {
1434	return func(m optionalAttr) {
1435		m["shrink_axis_mask"] = value
1436	}
1437}
1438
1439// Return a strided slice from `input`.
1440//
1441// Note, most python users will want to use the Python `Tensor.__getitem__`
1442// or `Variable.__getitem__` rather than this op directly.
1443//
1444// The goal of this op is to produce a new tensor with a subset of
1445// the elements from the `n` dimensional `input` tensor. The subset is chosen using
1446// a sequence of `m` sparse range specifications encoded into the arguments
1447// of this function. Note, in some cases
1448// `m` could be equal to `n`, but this need not be the case. Each
1449// range specification entry can be one of the following:
1450//
1451// - An ellipsis (...). Ellipses are used to imply zero or more
1452//   dimensions of full-dimension selection and are produced using
1453//   `ellipsis_mask`. For example, `foo[...]` is the identity slice.
1454//
1455// - A new axis. This is used to insert a new shape=1 dimension and is
1456//   produced using `new_axis_mask`. For example, `foo[:, ...]` where
1457//   `foo` is shape `(3, 4)` produces a `(1, 3, 4)` tensor.
1458//
1459//
1460// - A range `begin:end:stride`. This is used to specify how much to choose from
1461//   a given dimension. `stride` can be any integer but 0.  `begin` is an integer
1462//   which represents the index of the first value to select while `end` represents
1463//   the index of the last value to select. The number of values selected in each
1464//   dimension is `end - begin` if `stride > 0` and `begin - end` if `stride < 0`.
1465//   `begin` and `end` can be negative where `-1` is the last element, `-2` is
1466//   the second to last. `begin_mask` controls whether to replace the explicitly
1467//   given `begin` with an implicit effective value of `0` if `stride > 0` and
1468//   `-1` if `stride < 0`. `end_mask` is analogous but produces the number
1469//   required to create the largest open interval. For example, given a shape
1470//   `(3,)` tensor `foo[:]`, the effective `begin` and `end` are `0` and `3`. Do
1471//   not assume this is equivalent to `foo[0:-1]` which has an effective `begin`
1472//   and `end` of `0` and `2`. Another example is `foo[-2::-1]` which reverses the
1473//   first dimension of a tensor while dropping the last two (in the original
1474//   order elements). For example `foo = [1,2,3,4]; foo[-2::-1]` is `[4,3]`.
1475//
1476// - A single index. This is used to keep only elements that have a given
1477//   index. For example (`foo[2, :]` on a shape `(5,6)` tensor produces a
1478//   shape `(6,)` tensor. This is encoded in `begin` and `end` and
1479//   `shrink_axis_mask`.
1480//
1481// Each conceptual range specification is encoded in the op's argument. This
1482// encoding is best understand by considering a non-trivial example. In
1483// particular,
1484// `foo[1, 2:4, None, ..., :-3:-1, :]` will be encoded as
1485//
1486// ```
1487// begin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0)
1488// end = [2, 4, x, x, -3, x]
1489// strides = [1, 1, x, x, -1, 1]
1490// begin_mask = 1<<4 | 1 << 5 = 48
1491// end_mask = 1<<5 = 32
1492// ellipsis_mask = 1<<3 = 8
1493// new_axis_mask = 1<<2 4
1494// shrink_axis_mask = 1<<0
1495// ```
1496//
1497// In this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of
1498// the slice becomes (2, 1, 5, 5, 2, 5).
1499// Let us walk step by step through each argument specification.
1500//
1501// 1.  The first argument in the example slice is turned into `begin = 1` and
1502// `end = begin + 1 = 2`. To disambiguate from the original spec `2:4` we
1503// also set the appropriate bit in `shrink_axis_mask`.
1504//
1505// 2. `2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks have
1506// zero bits contributed.
1507//
1508// 3. None is a synonym for `tf.newaxis`. This means insert a dimension of size 1
1509// dimension in the final shape. Dummy values are contributed to begin,
1510// end and stride, while the new_axis_mask bit is set.
1511//
1512// 4. `...` grab the full ranges from as many dimensions as needed to
1513// fully specify a slice for every dimension of the input shape.
1514//
1515// 5. `:-3:-1` shows the use of negative indices. A negative index `i` associated
1516// with a dimension that has shape `s` is converted to a positive index
1517// `s + i`. So `-1` becomes `s-1` (i.e. the last element). This conversion
1518// is done internally so begin, end and strides receive x, -3, and -1.
1519// The appropriate begin_mask bit is set to indicate the start range is the
1520// full range (ignoring the x).
1521//
1522// 6. `:` indicates that the entire contents of the corresponding dimension
1523// is selected. This is equivalent to `::` or `0::1`. begin, end, and strides
1524// receive 0, 0, and 1, respectively. The appropriate bits in `begin_mask` and
1525// `end_mask` are also set.
1526//
1527// *Requirements*:
1528//   `0 != strides[i] for i in [0, m)`
1529//   `ellipsis_mask must be a power of two (only one ellipsis)`
1530//
1531// Arguments:
1532//
1533//	begin: `begin[k]` specifies the offset into the `k`th range specification.
1534// The exact dimension this corresponds to will be determined by context.
1535// Out-of-bounds values will be silently clamped. If the `k`th bit of
1536// `begin_mask` then `begin[k]` is ignored and the full range of the
1537// appropriate dimension is used instead. Negative values causes indexing
1538// to start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`.
1539//	end: `end[i]` is like `begin` with the exception that `end_mask` is
1540// used to determine full ranges.
1541//	strides: `strides[i]` specifies the increment in the `i`th specification
1542// after extracting a given element. Negative indices will reverse
1543// the original order. Out or range values are
1544// clamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0`
1545func StridedSlice(scope *Scope, input tf.Output, begin tf.Output, end tf.Output, strides tf.Output, optional ...StridedSliceAttr) (output tf.Output) {
1546	if scope.Err() != nil {
1547		return
1548	}
1549	attrs := map[string]interface{}{}
1550	for _, a := range optional {
1551		a(attrs)
1552	}
1553	opspec := tf.OpSpec{
1554		Type: "StridedSlice",
1555		Input: []tf.Input{
1556			input, begin, end, strides,
1557		},
1558		Attrs: attrs,
1559	}
1560	op := scope.AddOperation(opspec)
1561	return op.Output(0)
1562}
1563
1564// SizeAttr is an optional argument to Size.
1565type SizeAttr func(optionalAttr)
1566
1567// SizeOutType sets the optional out_type attribute to value.
1568// If not specified, defaults to DT_INT32
1569func SizeOutType(value tf.DataType) SizeAttr {
1570	return func(m optionalAttr) {
1571		m["out_type"] = value
1572	}
1573}
1574
1575// Returns the size of a tensor.
1576//
1577// This operation returns an integer representing the number of elements in
1578// `input`.
1579//
1580// For example:
1581//
1582// ```
1583// # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]]
1584// size(t) ==> 12
1585// ```
1586func Size(scope *Scope, input tf.Output, optional ...SizeAttr) (output tf.Output) {
1587	if scope.Err() != nil {
1588		return
1589	}
1590	attrs := map[string]interface{}{}
1591	for _, a := range optional {
1592		a(attrs)
1593	}
1594	opspec := tf.OpSpec{
1595		Type: "Size",
1596		Input: []tf.Input{
1597			input,
1598		},
1599		Attrs: attrs,
1600	}
1601	op := scope.AddOperation(opspec)
1602	return op.Output(0)
1603}
1604
1605// Returns the rank of a tensor.
1606//
1607// This operation returns an integer representing the rank of `input`.
1608//
1609// For example:
1610//
1611// ```
1612// # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
1613// # shape of tensor 't' is [2, 2, 3]
1614// rank(t) ==> 3
1615// ```
1616//
1617// **Note**: The rank of a tensor is not the same as the rank of a matrix. The rank
1618// of a tensor is the number of indices required to uniquely select each element
1619// of the tensor. Rank is also known as "order", "degree", or "ndims."
1620func Rank(scope *Scope, input tf.Output) (output tf.Output) {
1621	if scope.Err() != nil {
1622		return
1623	}
1624	opspec := tf.OpSpec{
1625		Type: "Rank",
1626		Input: []tf.Input{
1627			input,
1628		},
1629	}
1630	op := scope.AddOperation(opspec)
1631	return op.Output(0)
1632}
1633
1634// ReverseSequenceAttr is an optional argument to ReverseSequence.
1635type ReverseSequenceAttr func(optionalAttr)
1636
1637// ReverseSequenceBatchDim sets the optional batch_dim attribute to value.
1638//
1639// value: The dimension along which reversal is performed.
1640// If not specified, defaults to 0
1641func ReverseSequenceBatchDim(value int64) ReverseSequenceAttr {
1642	return func(m optionalAttr) {
1643		m["batch_dim"] = value
1644	}
1645}
1646
1647// Reverses variable length slices.
1648//
1649// This op first slices `input` along the dimension `batch_dim`, and for each
1650// slice `i`, reverses the first `seq_lengths[i]` elements along
1651// the dimension `seq_dim`.
1652//
1653// The elements of `seq_lengths` must obey `seq_lengths[i] <= input.dims[seq_dim]`,
1654// and `seq_lengths` must be a vector of length `input.dims[batch_dim]`.
1655//
1656// The output slice `i` along dimension `batch_dim` is then given by input
1657// slice `i`, with the first `seq_lengths[i]` slices along dimension
1658// `seq_dim` reversed.
1659//
1660// For example:
1661//
1662// ```
1663// # Given this:
1664// batch_dim = 0
1665// seq_dim = 1
1666// input.dims = (4, 8, ...)
1667// seq_lengths = [7, 2, 3, 5]
1668//
1669// # then slices of input are reversed on seq_dim, but only up to seq_lengths:
1670// output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...]
1671// output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...]
1672// output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...]
1673// output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...]
1674//
1675// # while entries past seq_lens are copied through:
1676// output[0, 7:, :, ...] = input[0, 7:, :, ...]
1677// output[1, 2:, :, ...] = input[1, 2:, :, ...]
1678// output[2, 3:, :, ...] = input[2, 3:, :, ...]
1679// output[3, 2:, :, ...] = input[3, 2:, :, ...]
1680// ```
1681//
1682// In contrast, if:
1683//
1684// ```
1685// # Given this:
1686// batch_dim = 2
1687// seq_dim = 0
1688// input.dims = (8, ?, 4, ...)
1689// seq_lengths = [7, 2, 3, 5]
1690//
1691// # then slices of input are reversed on seq_dim, but only up to seq_lengths:
1692// output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...]
1693// output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...]
1694// output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...]
1695// output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...]
1696//
1697// # while entries past seq_lens are copied through:
1698// output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...]
1699// output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...]
1700// output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...]
1701// output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...]
1702// ```
1703//
1704// Arguments:
1705//	input: The input to reverse.
1706//	seq_lengths: 1-D with length `input.dims(batch_dim)` and
1707// `max(seq_lengths) <= input.dims(seq_dim)`
1708//	seq_dim: The dimension which is partially reversed.
1709//
1710// Returns The partially reversed input. It has the same shape as `input`.
1711func ReverseSequence(scope *Scope, input tf.Output, seq_lengths tf.Output, seq_dim int64, optional ...ReverseSequenceAttr) (output tf.Output) {
1712	if scope.Err() != nil {
1713		return
1714	}
1715	attrs := map[string]interface{}{"seq_dim": seq_dim}
1716	for _, a := range optional {
1717		a(attrs)
1718	}
1719	opspec := tf.OpSpec{
1720		Type: "ReverseSequence",
1721		Input: []tf.Input{
1722			input, seq_lengths,
1723		},
1724		Attrs: attrs,
1725	}
1726	op := scope.AddOperation(opspec)
1727	return op.Output(0)
1728}
1729
1730// Ensures that the tensor's shape matches the expected shape.
1731//
1732// Raises an error if the input tensor's shape does not match the specified shape.
1733// Returns the input tensor otherwise.
1734//
1735// Arguments:
1736//	input: A tensor, whose shape is to be validated.
1737//	shape: The expected (possibly partially specified) shape of the input tensor.
1738//
1739// Returns A tensor with the same shape and contents as the input tensor or value.
1740func EnsureShape(scope *Scope, input tf.Output, shape tf.Shape) (output tf.Output) {
1741	if scope.Err() != nil {
1742		return
1743	}
1744	attrs := map[string]interface{}{"shape": shape}
1745	opspec := tf.OpSpec{
1746		Type: "EnsureShape",
1747		Input: []tf.Input{
1748			input,
1749		},
1750		Attrs: attrs,
1751	}
1752	op := scope.AddOperation(opspec)
1753	return op.Output(0)
1754}
1755
1756// UniqueWithCountsV2Attr is an optional argument to UniqueWithCountsV2.
1757type UniqueWithCountsV2Attr func(optionalAttr)
1758
1759// UniqueWithCountsV2OutIdx sets the optional out_idx attribute to value.
1760// If not specified, defaults to DT_INT32
1761func UniqueWithCountsV2OutIdx(value tf.DataType) UniqueWithCountsV2Attr {
1762	return func(m optionalAttr) {
1763		m["out_idx"] = value
1764	}
1765}
1766
1767// Finds unique elements along an axis of a tensor.
1768//
1769// This operation either returns a tensor `y` containing unique elements
1770// along the `axis` of a tensor. The returned unique elements is sorted
1771// in the same order as they occur along `axis` in `x`.
1772// This operation also returns a tensor `idx` and a tensor `count`
1773// that are the same size as the number of the elements in `x` along the
1774// `axis` dimension. The `idx` contains the index in the unique output `y`
1775// and the `count` contains the count in the unique output `y`.
1776// In other words, for an `1-D` tensor `x` with `axis = None:
1777//
1778// `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
1779//
1780// For example:
1781//
1782// ```
1783// # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
1784// y, idx, count = unique_with_counts(x)
1785// y ==> [1, 2, 4, 7, 8]
1786// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
1787// count ==> [2, 1, 3, 1, 2]
1788// ```
1789//
1790// For an `2-D` tensor `x` with `axis = 0`:
1791//
1792// ```
1793// # tensor 'x' is [[1, 0, 0],
1794// #                [1, 0, 0],
1795// #                [2, 0, 0]]
1796// y, idx, count = unique_with_counts(x, axis=0)
1797// y ==> [[1, 0, 0],
1798//        [2, 0, 0]]
1799// idx ==> [0, 0, 1]
1800// count ==> [2, 1]
1801// ```
1802//
1803// For an `2-D` tensor `x` with `axis = 1`:
1804//
1805// ```
1806// # tensor 'x' is [[1, 0, 0],
1807// #                [1, 0, 0],
1808// #                [2, 0, 0]]
1809// y, idx, count = unique_with_counts(x, axis=1)
1810// y ==> [[1, 0],
1811//        [1, 0],
1812//        [2, 0]]
1813// idx ==> [0, 1, 1]
1814// count ==> [1, 2]
1815// ```
1816//
1817// Arguments:
1818//	x: A `Tensor`.
1819//	axis: A `Tensor` of type `int32` (default: None). The axis of the Tensor to
1820// find the unique elements.
1821//
1822// Returns A `Tensor`. Unique elements along the `axis` of `Tensor` x.A 1-D Tensor. Has the same type as x that contains the index of each
1823// value of x in the output y.A 1-D Tensor. The count of each value of x in the output y.
1824func UniqueWithCountsV2(scope *Scope, x tf.Output, axis tf.Output, optional ...UniqueWithCountsV2Attr) (y tf.Output, idx tf.Output, count tf.Output) {
1825	if scope.Err() != nil {
1826		return
1827	}
1828	attrs := map[string]interface{}{}
1829	for _, a := range optional {
1830		a(attrs)
1831	}
1832	opspec := tf.OpSpec{
1833		Type: "UniqueWithCountsV2",
1834		Input: []tf.Input{
1835			x, axis,
1836		},
1837		Attrs: attrs,
1838	}
1839	op := scope.AddOperation(opspec)
1840	return op.Output(0), op.Output(1), op.Output(2)
1841}
1842
1843// UniqueWithCountsAttr is an optional argument to UniqueWithCounts.
1844type UniqueWithCountsAttr func(optionalAttr)
1845
1846// UniqueWithCountsOutIdx sets the optional out_idx attribute to value.
1847// If not specified, defaults to DT_INT32
1848func UniqueWithCountsOutIdx(value tf.DataType) UniqueWithCountsAttr {
1849	return func(m optionalAttr) {
1850		m["out_idx"] = value
1851	}
1852}
1853
1854// Finds unique elements in a 1-D tensor.
1855//
1856// This operation returns a tensor `y` containing all of the unique elements of `x`
1857// sorted in the same order that they occur in `x`. This operation also returns a
1858// tensor `idx` the same size as `x` that contains the index of each value of `x`
1859// in the unique output `y`. Finally, it returns a third tensor `count` that
1860// contains the count of each element of `y` in `x`. In other words:
1861//
1862// `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
1863//
1864// For example:
1865//
1866// ```
1867// # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
1868// y, idx, count = unique_with_counts(x)
1869// y ==> [1, 2, 4, 7, 8]
1870// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
1871// count ==> [2, 1, 3, 1, 2]
1872// ```
1873//
1874// Arguments:
1875//	x: 1-D.
1876//
1877// Returns 1-D.1-D.1-D.
1878func UniqueWithCounts(scope *Scope, x tf.Output, optional ...UniqueWithCountsAttr) (y tf.Output, idx tf.Output, count tf.Output) {
1879	if scope.Err() != nil {
1880		return
1881	}
1882	attrs := map[string]interface{}{}
1883	for _, a := range optional {
1884		a(attrs)
1885	}
1886	opspec := tf.OpSpec{
1887		Type: "UniqueWithCounts",
1888		Input: []tf.Input{
1889			x,
1890		},
1891		Attrs: attrs,
1892	}
1893	op := scope.AddOperation(opspec)
1894	return op.Output(0), op.Output(1), op.Output(2)
1895}
1896
1897// UniqueV2Attr is an optional argument to UniqueV2.
1898type UniqueV2Attr func(optionalAttr)
1899
1900// UniqueV2OutIdx sets the optional out_idx attribute to value.
1901// If not specified, defaults to DT_INT32
1902func UniqueV2OutIdx(value tf.DataType) UniqueV2Attr {
1903	return func(m optionalAttr) {
1904		m["out_idx"] = value
1905	}
1906}
1907
1908// Finds unique elements along an axis of a tensor.
1909//
1910// This operation either returns a tensor `y` containing unique elements
1911// along the `axis` of a tensor. The returned unique elements is sorted
1912// in the same order as they occur along `axis` in `x`.
1913// This operation also returns a tensor `idx` that is the same size as
1914// the number of the elements in `x` along the `axis` dimension. It
1915// contains the index in the unique output `y`.
1916// In other words, for an `1-D` tensor `x` with `axis = None:
1917//
1918// `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
1919//
1920// For example:
1921//
1922// ```
1923// # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
1924// y, idx = unique(x)
1925// y ==> [1, 2, 4, 7, 8]
1926// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
1927// ```
1928//
1929// For an `2-D` tensor `x` with `axis = 0`:
1930//
1931// ```
1932// # tensor 'x' is [[1, 0, 0],
1933// #                [1, 0, 0],
1934// #                [2, 0, 0]]
1935// y, idx = unique(x, axis=0)
1936// y ==> [[1, 0, 0],
1937//        [2, 0, 0]]
1938// idx ==> [0, 0, 1]
1939// ```
1940//
1941// For an `2-D` tensor `x` with `axis = 1`:
1942//
1943// ```
1944// # tensor 'x' is [[1, 0, 0],
1945// #                [1, 0, 0],
1946// #                [2, 0, 0]]
1947// y, idx = unique(x, axis=1)
1948// y ==> [[1, 0],
1949//        [1, 0],
1950//        [2, 0]]
1951// idx ==> [0, 1, 1]
1952// ```
1953//
1954// Arguments:
1955//	x: A `Tensor`.
1956//	axis: A `Tensor` of type `int32` (default: None). The axis of the Tensor to
1957// find the unique elements.
1958//
1959// Returns A `Tensor`. Unique elements along the `axis` of `Tensor` x.A 1-D Tensor. Has the same type as x that contains the index of each
1960// value of x in the output y.
1961func UniqueV2(scope *Scope, x tf.Output, axis tf.Output, optional ...UniqueV2Attr) (y tf.Output, idx tf.Output) {
1962	if scope.Err() != nil {
1963		return
1964	}
1965	attrs := map[string]interface{}{}
1966	for _, a := range optional {
1967		a(attrs)
1968	}
1969	opspec := tf.OpSpec{
1970		Type: "UniqueV2",
1971		Input: []tf.Input{
1972			x, axis,
1973		},
1974		Attrs: attrs,
1975	}
1976	op := scope.AddOperation(opspec)
1977	return op.Output(0), op.Output(1)
1978}
1979
1980// UniqueAttr is an optional argument to Unique.
1981type UniqueAttr func(optionalAttr)
1982
1983// UniqueOutIdx sets the optional out_idx attribute to value.
1984// If not specified, defaults to DT_INT32
1985func UniqueOutIdx(value tf.DataType) UniqueAttr {
1986	return func(m optionalAttr) {
1987		m["out_idx"] = value
1988	}
1989}
1990
1991// Finds unique elements in a 1-D tensor.
1992//
1993// This operation returns a tensor `y` containing all of the unique elements of `x`
1994// sorted in the same order that they occur in `x`. This operation also returns a
1995// tensor `idx` the same size as `x` that contains the index of each value of `x`
1996// in the unique output `y`. In other words:
1997//
1998// `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
1999//
2000// For example:
2001//
2002// ```
2003// # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
2004// y, idx = unique(x)
2005// y ==> [1, 2, 4, 7, 8]
2006// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
2007// ```
2008//
2009// Arguments:
2010//	x: 1-D.
2011//
2012// Returns 1-D.1-D.
2013func Unique(scope *Scope, x tf.Output, optional ...UniqueAttr) (y tf.Output, idx tf.Output) {
2014	if scope.Err() != nil {
2015		return
2016	}
2017	attrs := map[string]interface{}{}
2018	for _, a := range optional {
2019		a(attrs)
2020	}
2021	opspec := tf.OpSpec{
2022		Type: "Unique",
2023		Input: []tf.Input{
2024			x,
2025		},
2026		Attrs: attrs,
2027	}
2028	op := scope.AddOperation(opspec)
2029	return op.Output(0), op.Output(1)
2030}
2031
2032// Shuffle dimensions of x according to a permutation and conjugate the result.
2033//
2034// The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy:
2035//   `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`
2036//   `y[i,j,k,...,s,t,u] == conj(x[perm[i], perm[j], perm[k],...,perm[s], perm[t], perm[u]])`
2037func ConjugateTranspose(scope *Scope, x tf.Output, perm tf.Output) (y tf.Output) {
2038	if scope.Err() != nil {
2039		return
2040	}
2041	opspec := tf.OpSpec{
2042		Type: "ConjugateTranspose",
2043		Input: []tf.Input{
2044			x, perm,
2045		},
2046	}
2047	op := scope.AddOperation(opspec)
2048	return op.Output(0)
2049}
2050
2051// Reshapes a tensor.
2052//
2053// Given `tensor`, this operation returns a tensor that has the same values
2054// as `tensor` with shape `shape`.
2055//
2056// If one component of `shape` is the special value -1, the size of that dimension
2057// is computed so that the total size remains constant.  In particular, a `shape`
2058// of `[-1]` flattens into 1-D.  At most one component of `shape` can be -1.
2059//
2060// If `shape` is 1-D or higher, then the operation returns a tensor with shape
2061// `shape` filled with the values of `tensor`. In this case, the number of elements
2062// implied by `shape` must be the same as the number of elements in `tensor`.
2063//
2064// For example:
2065//
2066// ```
2067// # tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9]
2068// # tensor 't' has shape [9]
2069// reshape(t, [3, 3]) ==> [[1, 2, 3],
2070//                         [4, 5, 6],
2071//                         [7, 8, 9]]
2072//
2073// # tensor 't' is [[[1, 1], [2, 2]],
2074// #                [[3, 3], [4, 4]]]
2075// # tensor 't' has shape [2, 2, 2]
2076// reshape(t, [2, 4]) ==> [[1, 1, 2, 2],
2077//                         [3, 3, 4, 4]]
2078//
2079// # tensor 't' is [[[1, 1, 1],
2080// #                 [2, 2, 2]],
2081// #                [[3, 3, 3],
2082// #                 [4, 4, 4]],
2083// #                [[5, 5, 5],
2084// #                 [6, 6, 6]]]
2085// # tensor 't' has shape [3, 2, 3]
2086// # pass '[-1]' to flatten 't'
2087// reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]
2088//
2089// # -1 can also be used to infer the shape
2090//
2091// # -1 is inferred to be 9:
2092// reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
2093//                          [4, 4, 4, 5, 5, 5, 6, 6, 6]]
2094// # -1 is inferred to be 2:
2095// reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
2096//                          [4, 4, 4, 5, 5, 5, 6, 6, 6]]
2097// # -1 is inferred to be 3:
2098// reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1],
2099//                               [2, 2, 2],
2100//                               [3, 3, 3]],
2101//                              [[4, 4, 4],
2102//                               [5, 5, 5],
2103//                               [6, 6, 6]]]
2104//
2105// # tensor 't' is [7]
2106// # shape `[]` reshapes to a scalar
2107// reshape(t, []) ==> 7
2108// ```
2109//
2110// Arguments:
2111//
2112//	shape: Defines the shape of the output tensor.
2113func Reshape(scope *Scope, tensor tf.Output, shape tf.Output) (output tf.Output) {
2114	if scope.Err() != nil {
2115		return
2116	}
2117	opspec := tf.OpSpec{
2118		Type: "Reshape",
2119		Input: []tf.Input{
2120			tensor, shape,
2121		},
2122	}
2123	op := scope.AddOperation(opspec)
2124	return op.Output(0)
2125}
2126
2127// Checks a tensor for NaN and Inf values.
2128//
2129// When run, reports an `InvalidArgument` error if `tensor` has any values
2130// that are not a number (NaN) or infinity (Inf). Otherwise, passes `tensor` as-is.
2131//
2132// Arguments:
2133//
2134//	message: Prefix of the error message.
2135func CheckNumerics(scope *Scope, tensor tf.Output, message string) (output tf.Output) {
2136	if scope.Err() != nil {
2137		return
2138	}
2139	attrs := map[string]interface{}{"message": message}
2140	opspec := tf.OpSpec{
2141		Type: "CheckNumerics",
2142		Input: []tf.Input{
2143			tensor,
2144		},
2145		Attrs: attrs,
2146	}
2147	op := scope.AddOperation(opspec)
2148	return op.Output(0)
2149}
2150
2151// Gather slices from `params` into a Tensor with shape specified by `indices`.
2152//
2153// `indices` is an K-dimensional integer tensor, best thought of as a
2154// (K-1)-dimensional tensor of indices into `params`, where each element defines a
2155// slice of `params`:
2156//
2157//     output[\\(i_0, ..., i_{K-2}\\)] = params[indices[\\(i_0, ..., i_{K-2}\\)]]
2158//
2159// Whereas in `tf.gather` `indices` defines slices into the first
2160// dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the
2161// first `N` dimensions of `params`, where `N = indices.shape[-1]`.
2162//
2163// The last dimension of `indices` can be at most the rank of
2164// `params`:
2165//
2166//     indices.shape[-1] <= params.rank
2167//
2168// The last dimension of `indices` corresponds to elements
2169// (if `indices.shape[-1] == params.rank`) or slices
2170// (if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]`
2171// of `params`.  The output tensor has shape
2172//
2173//     indices.shape[:-1] + params.shape[indices.shape[-1]:]
2174//
2175// Note that on CPU, if an out of bound index is found, an error is returned.
2176// On GPU, if an out of bound index is found, a 0 is stored in the
2177// corresponding output value.
2178//
2179// Some examples below.
2180//
2181// Simple indexing into a matrix:
2182//
2183// ```python
2184//     indices = [[0, 0], [1, 1]]
2185//     params = [['a', 'b'], ['c', 'd']]
2186//     output = ['a', 'd']
2187// ```
2188//
2189// Slice indexing into a matrix:
2190//
2191// ```python
2192//     indices = [[1], [0]]
2193//     params = [['a', 'b'], ['c', 'd']]
2194//     output = [['c', 'd'], ['a', 'b']]
2195// ```
2196//
2197// Indexing into a 3-tensor:
2198//
2199// ```python
2200//     indices = [[1]]
2201//     params = [[['a0', 'b0'], ['c0', 'd0']],
2202//               [['a1', 'b1'], ['c1', 'd1']]]
2203//     output = [[['a1', 'b1'], ['c1', 'd1']]]
2204//
2205//
2206//     indices = [[0, 1], [1, 0]]
2207//     params = [[['a0', 'b0'], ['c0', 'd0']],
2208//               [['a1', 'b1'], ['c1', 'd1']]]
2209//     output = [['c0', 'd0'], ['a1', 'b1']]
2210//
2211//
2212//     indices = [[0, 0, 1], [1, 0, 1]]
2213//     params = [[['a0', 'b0'], ['c0', 'd0']],
2214//               [['a1', 'b1'], ['c1', 'd1']]]
2215//     output = ['b0', 'b1']
2216// ```
2217//
2218// Batched indexing into a matrix:
2219//
2220// ```python
2221//     indices = [[[0, 0]], [[0, 1]]]
2222//     params = [['a', 'b'], ['c', 'd']]
2223//     output = [['a'], ['b']]
2224// ```
2225//
2226// Batched slice indexing into a matrix:
2227//
2228// ```python
2229//     indices = [[[1]], [[0]]]
2230//     params = [['a', 'b'], ['c', 'd']]
2231//     output = [[['c', 'd']], [['a', 'b']]]
2232// ```
2233//
2234// Batched indexing into a 3-tensor:
2235//
2236// ```python
2237//     indices = [[[1]], [[0]]]
2238//     params = [[['a0', 'b0'], ['c0', 'd0']],
2239//               [['a1', 'b1'], ['c1', 'd1']]]
2240//     output = [[[['a1', 'b1'], ['c1', 'd1']]],
2241//               [[['a0', 'b0'], ['c0', 'd0']]]]
2242//
2243//     indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]]
2244//     params = [[['a0', 'b0'], ['c0', 'd0']],
2245//               [['a1', 'b1'], ['c1', 'd1']]]
2246//     output = [[['c0', 'd0'], ['a1', 'b1']],
2247//               [['a0', 'b0'], ['c1', 'd1']]]
2248//
2249//
2250//     indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]]
2251//     params = [[['a0', 'b0'], ['c0', 'd0']],
2252//               [['a1', 'b1'], ['c1', 'd1']]]
2253//     output = [['b0', 'b1'], ['d0', 'c1']]
2254// ```
2255//
2256// See also `tf.gather` and `tf.batch_gather`.
2257//
2258// Arguments:
2259//	params: The tensor from which to gather values.
2260//	indices: Index tensor.
2261//
2262// Returns Values from `params` gathered from indices given by `indices`, with
2263// shape `indices.shape[:-1] + params.shape[indices.shape[-1]:]`.
2264func GatherNd(scope *Scope, params tf.Output, indices tf.Output) (output tf.Output) {
2265	if scope.Err() != nil {
2266		return
2267	}
2268	opspec := tf.OpSpec{
2269		Type: "GatherNd",
2270		Input: []tf.Input{
2271			params, indices,
2272		},
2273	}
2274	op := scope.AddOperation(opspec)
2275	return op.Output(0)
2276}
2277
2278// GatherAttr is an optional argument to Gather.
2279type GatherAttr func(optionalAttr)
2280
2281// GatherValidateIndices sets the optional validate_indices attribute to value.
2282// If not specified, defaults to true
2283func GatherValidateIndices(value bool) GatherAttr {
2284	return func(m optionalAttr) {
2285		m["validate_indices"] = value
2286	}
2287}
2288
2289// Gather slices from `params` according to `indices`.
2290//
2291// `indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
2292// Produces an output tensor with shape `indices.shape + params.shape[1:]` where:
2293//
2294// ```python
2295//     # Scalar indices
2296//     output[:, ..., :] = params[indices, :, ... :]
2297//
2298//     # Vector indices
2299//     output[i, :, ..., :] = params[indices[i], :, ... :]
2300//
2301//     # Higher rank indices
2302//     output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]
2303// ```
2304//
2305// If `indices` is a permutation and `len(indices) == params.shape[0]` then
2306// this operation will permute `params` accordingly.
2307//
2308// `validate_indices`: DEPRECATED. If this operation is assigned to CPU, values in
2309// `indices` are always validated to be within range. If assigned to GPU,
2310// out-of-bound indices result in safe but unspecified behavior, which may include
2311// raising an error.
2312//
2313// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
2314// <img style="width:100%" src="https://www.tensorflow.org/images/Gather.png" alt>
2315// </div>
2316func Gather(scope *Scope, params tf.Output, indices tf.Output, optional ...GatherAttr) (output tf.Output) {
2317	if scope.Err() != nil {
2318		return
2319	}
2320	attrs := map[string]interface{}{}
2321	for _, a := range optional {
2322		a(attrs)
2323	}
2324	opspec := tf.OpSpec{
2325		Type: "Gather",
2326		Input: []tf.Input{
2327			params, indices,
2328		},
2329		Attrs: attrs,
2330	}
2331	op := scope.AddOperation(opspec)
2332	return op.Output(0)
2333}
2334
2335// LowerBoundAttr is an optional argument to LowerBound.
2336type LowerBoundAttr func(optionalAttr)
2337
2338// LowerBoundOutType sets the optional out_type attribute to value.
2339// If not specified, defaults to DT_INT32
2340func LowerBoundOutType(value tf.DataType) LowerBoundAttr {
2341	return func(m optionalAttr) {
2342		m["out_type"] = value
2343	}
2344}
2345
2346// Applies lower_bound(sorted_search_values, values) along each row.
2347//
2348// Each set of rows with the same index in (sorted_inputs, values) is treated
2349// independently.  The resulting row is the equivalent of calling
2350// `np.searchsorted(sorted_inputs, values, side='left')`.
2351//
2352// The result is not a global index to the entire
2353// `Tensor`, but rather just the index in the last dimension.
2354//
2355// A 2-D example:
2356//   sorted_sequence = [[0, 3, 9, 9, 10],
2357//                      [1, 2, 3, 4, 5]]
2358//   values = [[2, 4, 9],
2359//             [0, 2, 6]]
2360//
2361//   result = LowerBound(sorted_sequence, values)
2362//
2363//   result == [[1, 2, 2],
2364//              [0, 1, 5]]
2365//
2366// Arguments:
2367//	sorted_inputs: 2-D Tensor where each row is ordered.
2368//	values: 2-D Tensor with the same numbers of rows as `sorted_search_values`. Contains
2369// the values that will be searched for in `sorted_search_values`.
2370//
2371// Returns A `Tensor` with the same shape as `values`.  It contains the first scalar index
2372// into the last dimension where values can be inserted without changing the
2373// ordered property.
2374func LowerBound(scope *Scope, sorted_inputs tf.Output, values tf.Output, optional ...LowerBoundAttr) (output tf.Output) {
2375	if scope.Err() != nil {
2376		return
2377	}
2378	attrs := map[string]interface{}{}
2379	for _, a := range optional {
2380		a(attrs)
2381	}
2382	opspec := tf.OpSpec{
2383		Type: "LowerBound",
2384		Input: []tf.Input{
2385			sorted_inputs, values,
2386		},
2387		Attrs: attrs,
2388	}
2389	op := scope.AddOperation(opspec)
2390	return op.Output(0)
2391}
2392
2393// Creates a tensor filled with a scalar value.
2394//
2395// This operation creates a tensor of shape `dims` and fills it with `value`.
2396//
2397// For example:
2398//
2399// ```
2400// # Output tensor has shape [2, 3].
2401// fill([2, 3], 9) ==> [[9, 9, 9]
2402//                      [9, 9, 9]]
2403// ```
2404//
2405// `tf.fill` differs from `tf.constant` in a few ways:
2406//
2407// *   `tf.fill` only supports scalar contents, whereas `tf.constant` supports
2408//     Tensor values.
2409// *   `tf.fill` creates an Op in the computation graph that constructs the actual
2410//     Tensor value at runtime. This is in contrast to `tf.constant` which embeds
2411//     the entire Tensor into the graph with a `Const` node.
2412// *   Because `tf.fill` evaluates at graph runtime, it supports dynamic shapes
2413//     based on other runtime Tensors, unlike `tf.constant`.
2414//
2415// Arguments:
2416//	dims: 1-D. Represents the shape of the output tensor.
2417//	value: 0-D (scalar). Value to fill the returned tensor.
2418//
2419// @compatibility(numpy)
2420// Equivalent to np.full
2421// @end_compatibility
2422func Fill(scope *Scope, dims tf.Output, value tf.Output) (output tf.Output) {
2423	if scope.Err() != nil {
2424		return
2425	}
2426	opspec := tf.OpSpec{
2427		Type: "Fill",
2428		Input: []tf.Input{
2429			dims, value,
2430		},
2431	}
2432	op := scope.AddOperation(opspec)
2433	return op.Output(0)
2434}
2435
2436// Reverses specific dimensions of a tensor.
2437//
2438// Given a `tensor`, and a `bool` tensor `dims` representing the dimensions
2439// of `tensor`, this operation reverses each dimension i of `tensor` where
2440// `dims[i]` is `True`.
2441//
2442// `tensor` can have up to 8 dimensions. The number of dimensions
2443// of `tensor` must equal the number of elements in `dims`. In other words:
2444//
2445// `rank(tensor) = size(dims)`
2446//
2447// For example:
2448//
2449// ```
2450// # tensor 't' is [[[[ 0,  1,  2,  3],
2451// #                  [ 4,  5,  6,  7],
2452// #                  [ 8,  9, 10, 11]],
2453// #                 [[12, 13, 14, 15],
2454// #                  [16, 17, 18, 19],
2455// #                  [20, 21, 22, 23]]]]
2456// # tensor 't' shape is [1, 2, 3, 4]
2457//
2458// # 'dims' is [False, False, False, True]
2459// reverse(t, dims) ==> [[[[ 3,  2,  1,  0],
2460//                         [ 7,  6,  5,  4],
2461//                         [ 11, 10, 9, 8]],
2462//                        [[15, 14, 13, 12],
2463//                         [19, 18, 17, 16],
2464//                         [23, 22, 21, 20]]]]
2465//
2466// # 'dims' is [False, True, False, False]
2467// reverse(t, dims) ==> [[[[12, 13, 14, 15],
2468//                         [16, 17, 18, 19],
2469//                         [20, 21, 22, 23]
2470//                        [[ 0,  1,  2,  3],
2471//                         [ 4,  5,  6,  7],
2472//                         [ 8,  9, 10, 11]]]]
2473//
2474// # 'dims' is [False, False, True, False]
2475// reverse(t, dims) ==> [[[[8, 9, 10, 11],
2476//                         [4, 5, 6, 7],
2477//                         [0, 1, 2, 3]]
2478//                        [[20, 21, 22, 23],
2479//                         [16, 17, 18, 19],
2480//                         [12, 13, 14, 15]]]]
2481// ```
2482//
2483// Arguments:
2484//	tensor: Up to 8-D.
2485//	dims: 1-D. The dimensions to reverse.
2486//
2487// Returns The same shape as `tensor`.
2488func Reverse(scope *Scope, tensor tf.Output, dims tf.Output) (output tf.Output) {
2489	if scope.Err() != nil {
2490		return
2491	}
2492	opspec := tf.OpSpec{
2493		Type: "Reverse",
2494		Input: []tf.Input{
2495			tensor, dims,
2496		},
2497	}
2498	op := scope.AddOperation(opspec)
2499	return op.Output(0)
2500}
2501
2502// Returns the batched diagonal part of a batched tensor.
2503//
2504// This operation returns a tensor with the `diagonal` part
2505// of the batched `input`. The `diagonal` part is computed as follows:
2506//
2507// Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a
2508// tensor of rank `k - 1` with dimensions `[I, J, K, ..., min(M, N)]` where:
2509//
2510// `diagonal[i, j, k, ..., n] = input[i, j, k, ..., n, n]`.
2511//
2512// The input must be at least a matrix.
2513//
2514// For example:
2515//
2516// ```
2517// # 'input' is [[[1, 0, 0, 0]
2518//                [0, 2, 0, 0]
2519//                [0, 0, 3, 0]
2520//                [0, 0, 0, 4]],
2521//               [[5, 0, 0, 0]
2522//                [0, 6, 0, 0]
2523//                [0, 0, 7, 0]
2524//                [0, 0, 0, 8]]]
2525//
2526// and input.shape = (2, 4, 4)
2527//
2528// tf.matrix_diag_part(input) ==> [[1, 2, 3, 4], [5, 6, 7, 8]]
2529//
2530// which has shape (2, 4)
2531// ```
2532//
2533// Arguments:
2534//	input: Rank `k` tensor where `k >= 2`.
2535//
2536// Returns The extracted diagonal(s) having shape
2537// `diagonal.shape = input.shape[:-2] + [min(input.shape[-2:])]`.
2538func MatrixDiagPart(scope *Scope, input tf.Output) (diagonal tf.Output) {
2539	if scope.Err() != nil {
2540		return
2541	}
2542	opspec := tf.OpSpec{
2543		Type: "MatrixDiagPart",
2544		Input: []tf.Input{
2545			input,
2546		},
2547	}
2548	op := scope.AddOperation(opspec)
2549	return op.Output(0)
2550}
2551
2552// Returns a batched diagonal tensor with a given batched diagonal values.
2553//
2554// Given a `diagonal`, this operation returns a tensor with the `diagonal` and
2555// everything else padded with zeros. The diagonal is computed as follows:
2556//
2557// Assume `diagonal` has `k` dimensions `[I, J, K, ..., N]`, then the output is a
2558// tensor of rank `k+1` with dimensions [I, J, K, ..., N, N]` where:
2559//
2560// `output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`.
2561//
2562// For example:
2563//
2564// ```
2565// # 'diagonal' is [[1, 2, 3, 4], [5, 6, 7, 8]]
2566//
2567// and diagonal.shape = (2, 4)
2568//
2569// tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0]
2570//                                      [0, 2, 0, 0]
2571//                                      [0, 0, 3, 0]
2572//                                      [0, 0, 0, 4]],
2573//                                     [[5, 0, 0, 0]
2574//                                      [0, 6, 0, 0]
2575//                                      [0, 0, 7, 0]
2576//                                      [0, 0, 0, 8]]]
2577//
2578// which has shape (2, 4, 4)
2579// ```
2580//
2581// Arguments:
2582//	diagonal: Rank `k`, where `k >= 1`.
2583//
2584// Returns Rank `k+1`, with `output.shape = diagonal.shape + [diagonal.shape[-1]]`.
2585func MatrixDiag(scope *Scope, diagonal tf.Output) (output tf.Output) {
2586	if scope.Err() != nil {
2587		return
2588	}
2589	opspec := tf.OpSpec{
2590		Type: "MatrixDiag",
2591		Input: []tf.Input{
2592			diagonal,
2593		},
2594	}
2595	op := scope.AddOperation(opspec)
2596	return op.Output(0)
2597}
2598
2599// QuantizedInstanceNormAttr is an optional argument to QuantizedInstanceNorm.
2600type QuantizedInstanceNormAttr func(optionalAttr)
2601
2602// QuantizedInstanceNormOutputRangeGiven sets the optional output_range_given attribute to value.
2603//
2604// value: If True, `given_y_min` and `given_y_min`
2605// and `given_y_max` are used as the output range. Otherwise,
2606// the implementation computes the output range.
2607// If not specified, defaults to false
2608func QuantizedInstanceNormOutputRangeGiven(value bool) QuantizedInstanceNormAttr {
2609	return func(m optionalAttr) {
2610		m["output_range_given"] = value
2611	}
2612}
2613
2614// QuantizedInstanceNormGivenYMin sets the optional given_y_min attribute to value.
2615//
2616// value: Output in `y_min` if `output_range_given` is True.
2617// If not specified, defaults to 0
2618func QuantizedInstanceNormGivenYMin(value float32) QuantizedInstanceNormAttr {
2619	return func(m optionalAttr) {
2620		m["given_y_min"] = value
2621	}
2622}
2623
2624// QuantizedInstanceNormGivenYMax sets the optional given_y_max attribute to value.
2625//
2626// value: Output in `y_max` if `output_range_given` is True.
2627// If not specified, defaults to 0
2628func QuantizedInstanceNormGivenYMax(value float32) QuantizedInstanceNormAttr {
2629	return func(m optionalAttr) {
2630		m["given_y_max"] = value
2631	}
2632}
2633
2634// QuantizedInstanceNormVarianceEpsilon sets the optional variance_epsilon attribute to value.
2635//
2636// value: A small float number to avoid dividing by 0.
2637// If not specified, defaults to 1e-05
2638func QuantizedInstanceNormVarianceEpsilon(value float32) QuantizedInstanceNormAttr {
2639	return func(m optionalAttr) {
2640		m["variance_epsilon"] = value
2641	}
2642}
2643
2644// QuantizedInstanceNormMinSeparation sets the optional min_separation attribute to value.
2645//
2646// value: Minimum value of `y_max - y_min`
2647// If not specified, defaults to 0.001
2648func QuantizedInstanceNormMinSeparation(value float32) QuantizedInstanceNormAttr {
2649	return func(m optionalAttr) {
2650		m["min_separation"] = value
2651	}
2652}
2653
2654// Quantized Instance normalization.
2655//
2656// Arguments:
2657//	x: A 4D input Tensor.
2658//	x_min: The value represented by the lowest quantized input.
2659//	x_max: The value represented by the highest quantized input.
2660//
2661// Returns A 4D Tensor.The value represented by the lowest quantized output.The value represented by the highest quantized output.
2662func QuantizedInstanceNorm(scope *Scope, x tf.Output, x_min tf.Output, x_max tf.Output, optional ...QuantizedInstanceNormAttr) (y tf.Output, y_min tf.Output, y_max tf.Output) {
2663	if scope.Err() != nil {
2664		return
2665	}
2666	attrs := map[string]interface{}{}
2667	for _, a := range optional {
2668		a(attrs)
2669	}
2670	opspec := tf.OpSpec{
2671		Type: "QuantizedInstanceNorm",
2672		Input: []tf.Input{
2673			x, x_min, x_max,
2674		},
2675		Attrs: attrs,
2676	}
2677	op := scope.AddOperation(opspec)
2678	return op.Output(0), op.Output(1), op.Output(2)
2679}
2680
2681// Returns the diagonal part of the tensor.
2682//
2683// This operation returns a tensor with the `diagonal` part
2684// of the `input`. The `diagonal` part is computed as follows:
2685//
2686// Assume `input` has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a
2687// tensor of rank `k` with dimensions `[D1,..., Dk]` where:
2688//
2689// `diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`.
2690//
2691// For example:
2692//
2693// ```
2694// # 'input' is [[1, 0, 0, 0]
2695//               [0, 2, 0, 0]
2696//               [0, 0, 3, 0]
2697//               [0, 0, 0, 4]]
2698//
2699// tf.diag_part(input) ==> [1, 2, 3, 4]
2700// ```
2701//
2702// Arguments:
2703//	input: Rank k tensor where k is even and not zero.
2704//
2705// Returns The extracted diagonal.
2706func DiagPart(scope *Scope, input tf.Output) (diagonal tf.Output) {
2707	if scope.Err() != nil {
2708		return
2709	}
2710	opspec := tf.OpSpec{
2711		Type: "DiagPart",
2712		Input: []tf.Input{
2713			input,
2714		},
2715	}
2716	op := scope.AddOperation(opspec)
2717	return op.Output(0)
2718}
2719
2720// Gives a guarantee to the TF runtime that the input tensor is a constant.
2721//
2722// The runtime is then free to make optimizations based on this.
2723//
2724// Only accepts value typed tensors as inputs and rejects resource variable handles
2725// as input.
2726//
2727// Returns the input tensor without modification.
2728func GuaranteeConst(scope *Scope, input tf.Output) (output tf.Output) {
2729	if scope.Err() != nil {
2730		return
2731	}
2732	opspec := tf.OpSpec{
2733		Type: "GuaranteeConst",
2734		Input: []tf.Input{
2735			input,
2736		},
2737	}
2738	op := scope.AddOperation(opspec)
2739	return op.Output(0)
2740}
2741
2742// Returns a constant tensor on the host. Only for writing C++ tests.
2743//
2744// Arguments:
2745//	value: Attr `value` is the tensor to return.
2746//
2747func HostConst(scope *Scope, value tf.Tensor, dtype tf.DataType) (output tf.Output) {
2748	if scope.Err() != nil {
2749		return
2750	}
2751	attrs := map[string]interface{}{"value": value, "dtype": dtype}
2752	opspec := tf.OpSpec{
2753		Type: "HostConst",
2754
2755		Attrs: attrs,
2756	}
2757	op := scope.AddOperation(opspec)
2758	return op.Output(0)
2759}
2760
2761// Splits a tensor into `num_split` tensors along one dimension.
2762//
2763// Arguments:
2764//	value: The tensor to split.
2765//	size_splits: list containing the sizes of each output tensor along the split
2766// dimension. Must sum to the dimension of value along split_dim.
2767// Can contain one -1 indicating that dimension is to be inferred.
2768//	axis: 0-D.  The dimension along which to split.  Must be in the range
2769// `[-rank(value), rank(value))`.
2770//
2771//
2772// Returns Tensors whose shape matches that of `value`
2773// except along `axis`, where their sizes are
2774// `size_splits[i]`.
2775func SplitV(scope *Scope, value tf.Output, size_splits tf.Output, axis tf.Output, num_split int64) (output []tf.Output) {
2776	if scope.Err() != nil {
2777		return
2778	}
2779	attrs := map[string]interface{}{"num_split": num_split}
2780	opspec := tf.OpSpec{
2781		Type: "SplitV",
2782		Input: []tf.Input{
2783			value, size_splits, axis,
2784		},
2785		Attrs: attrs,
2786	}
2787	op := scope.AddOperation(opspec)
2788	if scope.Err() != nil {
2789		return
2790	}
2791	var idx int
2792	var err error
2793	if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
2794		scope.UpdateErr("SplitV", err)
2795		return
2796	}
2797	return output
2798}
2799
2800// Splits a tensor into `num_split` tensors along one dimension.
2801//
2802// Arguments:
2803//	axis: 0-D.  The dimension along which to split.  Must be in the range
2804// `[-rank(value), rank(value))`.
2805//	value: The tensor to split.
2806//	num_split: The number of ways to split.  Must evenly divide
2807// `value.shape[split_dim]`.
2808//
2809// Returns They are identically shaped tensors, whose shape matches that of `value`
2810// except along `axis`, where their sizes are
2811// `values.shape[split_dim] / num_split`.
2812func Split(scope *Scope, axis tf.Output, value tf.Output, num_split int64) (output []tf.Output) {
2813	if scope.Err() != nil {
2814		return
2815	}
2816	attrs := map[string]interface{}{"num_split": num_split}
2817	opspec := tf.OpSpec{
2818		Type: "Split",
2819		Input: []tf.Input{
2820			axis, value,
2821		},
2822		Attrs: attrs,
2823	}
2824	op := scope.AddOperation(opspec)
2825	if scope.Err() != nil {
2826		return
2827	}
2828	var idx int
2829	var err error
2830	if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
2831		scope.UpdateErr("Split", err)
2832		return
2833	}
2834	return output
2835}
2836
2837// Concatenates tensors along one dimension.
2838//
2839// Arguments:
2840//	concat_dim: 0-D.  The dimension along which to concatenate.  Must be in the
2841// range [0, rank(values)).
2842//	values: The `N` Tensors to concatenate. Their ranks and types must match,
2843// and their sizes must match in all dimensions except `concat_dim`.
2844//
2845// Returns A `Tensor` with the concatenation of values stacked along the
2846// `concat_dim` dimension.  This tensor's shape matches that of `values` except
2847// in `concat_dim` where it has the sum of the sizes.
2848func Concat(scope *Scope, concat_dim tf.Output, values []tf.Output) (output tf.Output) {
2849	if scope.Err() != nil {
2850		return
2851	}
2852	opspec := tf.OpSpec{
2853		Type: "Concat",
2854		Input: []tf.Input{
2855			concat_dim, tf.OutputList(values),
2856		},
2857	}
2858	op := scope.AddOperation(opspec)
2859	return op.Output(0)
2860}
2861
2862// Broadcast an array for a compatible shape.
2863//
2864// Broadcasting is the process of making arrays to have compatible shapes
2865// for arithmetic operations. Two shapes are compatible if for each
2866// dimension pair they are either equal or one of them is one. When trying
2867// to broadcast a Tensor to a shape, it starts with the trailing dimensions,
2868// and works its way forward.
2869//
2870// For example,
2871// ```
2872// >>> x = tf.constant([1, 2, 3])
2873// >>> y = tf.broadcast_to(x, [3, 3])
2874// >>> sess.run(y)
2875// array([[1, 2, 3],
2876//        [1, 2, 3],
2877//        [1, 2, 3]], dtype=int32)
2878// ```
2879// In the above example, the input Tensor with the shape of `[1, 3]`
2880// is broadcasted to output Tensor with shape of `[3, 3]`.
2881//
2882// Arguments:
2883//	input: A Tensor to broadcast.
2884//	shape: An 1-D `int` Tensor. The shape of the desired output.
2885//
2886// Returns A Tensor.
2887func BroadcastTo(scope *Scope, input tf.Output, shape tf.Output) (output tf.Output) {
2888	if scope.Err() != nil {
2889		return
2890	}
2891	opspec := tf.OpSpec{
2892		Type: "BroadcastTo",
2893		Input: []tf.Input{
2894			input, shape,
2895		},
2896	}
2897	op := scope.AddOperation(opspec)
2898	return op.Output(0)
2899}
2900
2901// Converts a flat index or array of flat indices into a tuple of
2902//
2903// coordinate arrays.
2904//
2905// @compatibility(numpy)
2906// Equivalent to np.unravel_index
2907// @end_compatibility
2908//
2909// Arguments:
2910//	indices: An 0-D or 1-D `int` Tensor whose elements are indices into the
2911// flattened version of an array of dimensions dims.
2912//	dims: An 1-D `int` Tensor. The shape of the array to use for unraveling
2913// indices.
2914//
2915// Returns An 2-D (or 1-D if indices is 0-D) tensor where each row has the
2916// same shape as the indices array.
2917func UnravelIndex(scope *Scope, indices tf.Output, dims tf.Output) (output tf.Output) {
2918	if scope.Err() != nil {
2919		return
2920	}
2921	opspec := tf.OpSpec{
2922		Type: "UnravelIndex",
2923		Input: []tf.Input{
2924			indices, dims,
2925		},
2926	}
2927	op := scope.AddOperation(opspec)
2928	return op.Output(0)
2929}
2930
2931//     Subtracts `v` into specified rows of `x`.
2932//
2933//     Computes y = x; y[i, :] -= v; return y.
2934//
2935// Arguments:
2936//	x: A `Tensor` of type T.
2937//	i: A vector. Indices into the left-most dimension of `x`.
2938//	v: A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size.
2939//
2940// Returns A `Tensor` of type T. An alias of `x`. The content of `y` is undefined if there are duplicates in `i`.
2941func InplaceSub(scope *Scope, x tf.Output, i tf.Output, v tf.Output) (y tf.Output) {
2942	if scope.Err() != nil {
2943		return
2944	}
2945	opspec := tf.OpSpec{
2946		Type: "InplaceSub",
2947		Input: []tf.Input{
2948			x, i, v,
2949		},
2950	}
2951	op := scope.AddOperation(opspec)
2952	return op.Output(0)
2953}
2954
2955// Makes a copy of `x`.
2956//
2957// Arguments:
2958//	x: The source tensor of type `T`.
2959//
2960// Returns     y: A `Tensor` of type `T`. A copy of `x`. Guaranteed that `y`
2961//       is not an alias of `x`.
2962func DeepCopy(scope *Scope, x tf.Output) (y tf.Output) {
2963	if scope.Err() != nil {
2964		return
2965	}
2966	opspec := tf.OpSpec{
2967		Type: "DeepCopy",
2968		Input: []tf.Input{
2969			x,
2970		},
2971	}
2972	op := scope.AddOperation(opspec)
2973	return op.Output(0)
2974}
2975
2976// PackAttr is an optional argument to Pack.
2977type PackAttr func(optionalAttr)
2978
2979// PackAxis sets the optional axis attribute to value.
2980//
2981// value: Dimension along which to pack.  Negative values wrap around, so the
2982// valid range is `[-(R+1), R+1)`.
2983// If not specified, defaults to 0
2984func PackAxis(value int64) PackAttr {
2985	return func(m optionalAttr) {
2986		m["axis"] = value
2987	}
2988}
2989
2990// Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor.
2991//
2992// Packs the `N` tensors in `values` into a tensor with rank one higher than each
2993// tensor in `values`, by packing them along the `axis` dimension.
2994// Given a list of tensors of shape `(A, B, C)`;
2995//
2996// if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.
2997// if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.
2998// Etc.
2999//
3000// For example:
3001//
3002// ```
3003// # 'x' is [1, 4]
3004// # 'y' is [2, 5]
3005// # 'z' is [3, 6]
3006// pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]]  # Pack along first dim.
3007// pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]]
3008// ```
3009//
3010// This is the opposite of `unpack`.
3011//
3012// Arguments:
3013//	values: Must be of same shape and type.
3014//
3015// Returns The packed tensor.
3016func Pack(scope *Scope, values []tf.Output, optional ...PackAttr) (output tf.Output) {
3017	if scope.Err() != nil {
3018		return
3019	}
3020	attrs := map[string]interface{}{}
3021	for _, a := range optional {
3022		a(attrs)
3023	}
3024	opspec := tf.OpSpec{
3025		Type: "Pack",
3026		Input: []tf.Input{
3027			tf.OutputList(values),
3028		},
3029		Attrs: attrs,
3030	}
3031	op := scope.AddOperation(opspec)
3032	return op.Output(0)
3033}
3034
3035// Concatenates a list of `N` tensors along the first dimension.
3036//
3037// The input tensors are all required to have size 1 in the first dimension.
3038//
3039// For example:
3040//
3041// ```
3042// # 'x' is [[1, 4]]
3043// # 'y' is [[2, 5]]
3044// # 'z' is [[3, 6]]
3045// parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]]  # Pack along first dim.
3046// ```
3047//
3048// The difference between concat and parallel_concat is that concat requires all
3049// of the inputs be computed before the operation will begin but doesn't require
3050// that the input shapes be known during graph construction.  Parallel concat
3051// will copy pieces of the input into the output as they become available, in
3052// some situations this can provide a performance benefit.
3053//
3054// Arguments:
3055//	values: Tensors to be concatenated. All must have size 1 in the first dimension
3056// and same shape.
3057//	shape: the final shape of the result; should be equal to the shapes of any input
3058// but with the number of input values in the first dimension.
3059//
3060// Returns The concatenated tensor.
3061func ParallelConcat(scope *Scope, values []tf.Output, shape tf.Shape) (output tf.Output) {
3062	if scope.Err() != nil {
3063		return
3064	}
3065	attrs := map[string]interface{}{"shape": shape}
3066	opspec := tf.OpSpec{
3067		Type: "ParallelConcat",
3068		Input: []tf.Input{
3069			tf.OutputList(values),
3070		},
3071		Attrs: attrs,
3072	}
3073	op := scope.AddOperation(opspec)
3074	return op.Output(0)
3075}
3076
3077// DecodeWavAttr is an optional argument to DecodeWav.
3078type DecodeWavAttr func(optionalAttr)
3079
3080// DecodeWavDesiredChannels sets the optional desired_channels attribute to value.
3081//
3082// value: Number of sample channels wanted.
3083// If not specified, defaults to -1
3084func DecodeWavDesiredChannels(value int64) DecodeWavAttr {
3085	return func(m optionalAttr) {
3086		m["desired_channels"] = value
3087	}
3088}
3089
3090// DecodeWavDesiredSamples sets the optional desired_samples attribute to value.
3091//
3092// value: Length of audio requested.
3093// If not specified, defaults to -1
3094func DecodeWavDesiredSamples(value int64) DecodeWavAttr {
3095	return func(m optionalAttr) {
3096		m["desired_samples"] = value
3097	}
3098}
3099
3100// Decode a 16-bit PCM WAV file to a float tensor.
3101//
3102// The -32768 to 32767 signed 16-bit values will be scaled to -1.0 to 1.0 in float.
3103//
3104// When desired_channels is set, if the input contains fewer channels than this
3105// then the last channel will be duplicated to give the requested number, else if
3106// the input has more channels than requested then the additional channels will be
3107// ignored.
3108//
3109// If desired_samples is set, then the audio will be cropped or padded with zeroes
3110// to the requested length.
3111//
3112// The first output contains a Tensor with the content of the audio samples. The
3113// lowest dimension will be the number of channels, and the second will be the
3114// number of samples. For example, a ten-sample-long stereo WAV file should give an
3115// output shape of [10, 2].
3116//
3117// Arguments:
3118//	contents: The WAV-encoded audio, usually from a file.
3119//
3120// Returns 2-D with shape `[length, channels]`.Scalar holding the sample rate found in the WAV header.
3121func DecodeWav(scope *Scope, contents tf.Output, optional ...DecodeWavAttr) (audio tf.Output, sample_rate tf.Output) {
3122	if scope.Err() != nil {
3123		return
3124	}
3125	attrs := map[string]interface{}{}
3126	for _, a := range optional {
3127		a(attrs)
3128	}
3129	opspec := tf.OpSpec{
3130		Type: "DecodeWav",
3131		Input: []tf.Input{
3132			contents,
3133		},
3134		Attrs: attrs,
3135	}
3136	op := scope.AddOperation(opspec)
3137	return op.Output(0), op.Output(1)
3138}
3139
3140// UnbatchAttr is an optional argument to Unbatch.
3141type UnbatchAttr func(optionalAttr)
3142
3143// UnbatchContainer sets the optional container attribute to value.
3144// If not specified, defaults to ""
3145func UnbatchContainer(value string) UnbatchAttr {
3146	return func(m optionalAttr) {
3147		m["container"] = value
3148	}
3149}
3150
3151// UnbatchSharedName sets the optional shared_name attribute to value.
3152// If not specified, defaults to ""
3153func UnbatchSharedName(value string) UnbatchAttr {
3154	return func(m optionalAttr) {
3155		m["shared_name"] = value
3156	}
3157}
3158
3159// Reverses the operation of Batch for a single output Tensor.
3160//
3161// An instance of Unbatch either receives an empty batched_tensor, in which case it
3162// asynchronously waits until the values become available from a concurrently
3163// running instance of Unbatch with the same container and shared_name, or receives
3164// a non-empty batched_tensor in which case it finalizes all other concurrently
3165// running instances and outputs its own element from the batch.
3166//
3167// batched_tensor: The possibly transformed output of Batch. The size of the first
3168//  dimension should remain unchanged by the transformations for the operation to
3169//  work.
3170// batch_index: The matching batch_index obtained from Batch.
3171// id: The id scalar emitted by Batch.
3172// unbatched_tensor: The Tensor corresponding to this execution.
3173// timeout_micros: Maximum amount of time (in microseconds) to wait to receive the
3174//  batched input tensor associated with a given invocation of the op.
3175// container: Container to control resource sharing.
3176// shared_name: Instances of Unbatch with the same container and shared_name are
3177//  assumed to possibly belong to the same batch. If left empty, the op name will
3178//  be used as the shared name.
3179func Unbatch(scope *Scope, batched_tensor tf.Output, batch_index tf.Output, id tf.Output, timeout_micros int64, optional ...UnbatchAttr) (unbatched_tensor tf.Output) {
3180	if scope.Err() != nil {
3181		return
3182	}
3183	attrs := map[string]interface{}{"timeout_micros": timeout_micros}
3184	for _, a := range optional {
3185		a(attrs)
3186	}
3187	opspec := tf.OpSpec{
3188		Type: "Unbatch",
3189		Input: []tf.Input{
3190			batched_tensor, batch_index, id,
3191		},
3192		Attrs: attrs,
3193	}
3194	op := scope.AddOperation(opspec)
3195	return op.Output(0)
3196}
3197
3198// Elementwise computes the bitwise left-shift of `x` and `y`.
3199//
3200// If `y` is negative, or greater than or equal to the width of `x` in bits the
3201// result is implementation defined.
3202func LeftShift(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
3203	if scope.Err() != nil {
3204		return
3205	}
3206	opspec := tf.OpSpec{
3207		Type: "LeftShift",
3208		Input: []tf.Input{
3209			x, y,
3210		},
3211	}
3212	op := scope.AddOperation(opspec)
3213	return op.Output(0)
3214}
3215
3216// Elementwise computes the bitwise XOR of `x` and `y`.
3217//
3218// The result will have those bits set, that are different in `x` and `y`. The
3219// computation is performed on the underlying representations of `x` and `y`.
3220func BitwiseXor(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
3221	if scope.Err() != nil {
3222		return
3223	}
3224	opspec := tf.OpSpec{
3225		Type: "BitwiseXor",
3226		Input: []tf.Input{
3227			x, y,
3228		},
3229	}
3230	op := scope.AddOperation(opspec)
3231	return op.Output(0)
3232}
3233
3234// Computes element-wise population count (a.k.a. popcount, bitsum, bitcount).
3235//
3236// For each entry in `x`, calculates the number of `1` (on) bits in the binary
3237// representation of that entry.
3238//
3239// **NOTE**: It is more efficient to first `tf.bitcast` your tensors into
3240// `int32` or `int64` and perform the bitcount on the result, than to feed in
3241// 8- or 16-bit inputs and then aggregate the resulting counts.
3242func PopulationCount(scope *Scope, x tf.Output) (y tf.Output) {
3243	if scope.Err() != nil {
3244		return
3245	}
3246	opspec := tf.OpSpec{
3247		Type: "PopulationCount",
3248		Input: []tf.Input{
3249			x,
3250		},
3251	}
3252	op := scope.AddOperation(opspec)
3253	return op.Output(0)
3254}
3255
3256// Bucketize each feature based on bucket boundaries.
3257//
3258// An op that returns a list of float tensors, where each tensor represents the
3259// bucketized values for a single feature.
3260//
3261// Arguments:
3262//	float_values: float; List of Rank 1 Tensor each containing float values for a single feature.
3263//	bucket_boundaries: float; List of Rank 1 Tensors each containing the bucket boundaries for a single
3264// feature.
3265//
3266// Returns int; List of Rank 1 Tensors each containing the bucketized values for a single feature.
3267func BoostedTreesBucketize(scope *Scope, float_values []tf.Output, bucket_boundaries []tf.Output) (buckets []tf.Output) {
3268	if scope.Err() != nil {
3269		return
3270	}
3271	opspec := tf.OpSpec{
3272		Type: "BoostedTreesBucketize",
3273		Input: []tf.Input{
3274			tf.OutputList(float_values), tf.OutputList(bucket_boundaries),
3275		},
3276	}
3277	op := scope.AddOperation(opspec)
3278	if scope.Err() != nil {
3279		return
3280	}
3281	var idx int
3282	var err error
3283	if buckets, idx, err = makeOutputList(op, idx, "buckets"); err != nil {
3284		scope.UpdateErr("BoostedTreesBucketize", err)
3285		return
3286	}
3287	return buckets
3288}
3289
3290// BoostedTreesQuantileStreamResourceFlushAttr is an optional argument to BoostedTreesQuantileStreamResourceFlush.
3291type BoostedTreesQuantileStreamResourceFlushAttr func(optionalAttr)
3292
3293// BoostedTreesQuantileStreamResourceFlushGenerateQuantiles sets the optional generate_quantiles attribute to value.
3294//
3295// value: bool; If True, the output will be the num_quantiles for each stream where the ith
3296// entry is the ith quantile of the input with an approximation error of epsilon.
3297// Duplicate values may be present.
3298// If False, the output will be the points in the histogram that we got which roughly
3299// translates to 1/epsilon boundaries and without any duplicates.
3300// Default to False.
3301// If not specified, defaults to false
3302func BoostedTreesQuantileStreamResourceFlushGenerateQuantiles(value bool) BoostedTreesQuantileStreamResourceFlushAttr {
3303	return func(m optionalAttr) {
3304		m["generate_quantiles"] = value
3305	}
3306}
3307
3308// Flush the summaries for a quantile stream resource.
3309//
3310// An op that flushes the summaries for a quantile stream resource.
3311//
3312// Arguments:
3313//	quantile_stream_resource_handle: resource handle referring to a QuantileStreamResource.
3314//	num_buckets: int; approximate number of buckets unless using generate_quantiles.
3315//
3316// Returns the created operation.
3317func BoostedTreesQuantileStreamResourceFlush(scope *Scope, quantile_stream_resource_handle tf.Output, num_buckets tf.Output, optional ...BoostedTreesQuantileStreamResourceFlushAttr) (o *tf.Operation) {
3318	if scope.Err() != nil {
3319		return
3320	}
3321	attrs := map[string]interface{}{}
3322	for _, a := range optional {
3323		a(attrs)
3324	}
3325	opspec := tf.OpSpec{
3326		Type: "BoostedTreesQuantileStreamResourceFlush",
3327		Input: []tf.Input{
3328			quantile_stream_resource_handle, num_buckets,
3329		},
3330		Attrs: attrs,
3331	}
3332	return scope.AddOperation(opspec)
3333}
3334
3335// Makes the summary of quantiles for the batch.
3336//
3337// An op that takes a list of tensors (one tensor per feature) and outputs the
3338// quantile summaries for each tensor.
3339//
3340// Arguments:
3341//	float_values: float; List of Rank 1 Tensors each containing values for a single feature.
3342//	example_weights: float; Rank 1 Tensor with weights per instance.
3343//	epsilon: float; The required maximum approximation error.
3344//
3345// Returns float; List of Rank 2 Tensors each containing the quantile summary
3346// (value, weight, min_rank, max_rank) of a single feature.
3347func BoostedTreesMakeQuantileSummaries(scope *Scope, float_values []tf.Output, example_weights tf.Output, epsilon tf.Output) (summaries []tf.Output) {
3348	if scope.Err() != nil {
3349		return
3350	}
3351	opspec := tf.OpSpec{
3352		Type: "BoostedTreesMakeQuantileSummaries",
3353		Input: []tf.Input{
3354			tf.OutputList(float_values), example_weights, epsilon,
3355		},
3356	}
3357	op := scope.AddOperation(opspec)
3358	if scope.Err() != nil {
3359		return
3360	}
3361	var idx int
3362	var err error
3363	if summaries, idx, err = makeOutputList(op, idx, "summaries"); err != nil {
3364		scope.UpdateErr("BoostedTreesMakeQuantileSummaries", err)
3365		return
3366	}
3367	return summaries
3368}
3369
3370// BoostedTreesCreateQuantileStreamResourceAttr is an optional argument to BoostedTreesCreateQuantileStreamResource.
3371type BoostedTreesCreateQuantileStreamResourceAttr func(optionalAttr)
3372
3373// BoostedTreesCreateQuantileStreamResourceMaxElements sets the optional max_elements attribute to value.
3374//
3375// value: int; The maximum number of data points that can be fed to the stream.
3376// If not specified, defaults to 1099511627776
3377func BoostedTreesCreateQuantileStreamResourceMaxElements(value int64) BoostedTreesCreateQuantileStreamResourceAttr {
3378	return func(m optionalAttr) {
3379		m["max_elements"] = value
3380	}
3381}
3382
3383// Create the Resource for Quantile Streams.
3384//
3385// Arguments:
3386//	quantile_stream_resource_handle: resource; Handle to quantile stream resource.
3387//	epsilon: float; The required approximation error of the stream resource.
3388//	num_streams: int; The number of streams managed by the resource that shares the same epsilon.
3389//
3390// Returns the created operation.
3391func BoostedTreesCreateQuantileStreamResource(scope *Scope, quantile_stream_resource_handle tf.Output, epsilon tf.Output, num_streams tf.Output, optional ...BoostedTreesCreateQuantileStreamResourceAttr) (o *tf.Operation) {
3392	if scope.Err() != nil {
3393		return
3394	}
3395	attrs := map[string]interface{}{}
3396	for _, a := range optional {
3397		a(attrs)
3398	}
3399	opspec := tf.OpSpec{
3400		Type: "BoostedTreesCreateQuantileStreamResource",
3401		Input: []tf.Input{
3402			quantile_stream_resource_handle, epsilon, num_streams,
3403		},
3404		Attrs: attrs,
3405	}
3406	return scope.AddOperation(opspec)
3407}
3408
3409// Checks whether a quantile stream has been initialized.
3410//
3411// An Op that checks if quantile stream resource is initialized.
3412//
3413// Arguments:
3414//	quantile_stream_resource_handle: resource; The reference to quantile stream resource handle.
3415//
3416// Returns bool; True if the resource is initialized, False otherwise.
3417func IsBoostedTreesQuantileStreamResourceInitialized(scope *Scope, quantile_stream_resource_handle tf.Output) (is_initialized tf.Output) {
3418	if scope.Err() != nil {
3419		return
3420	}
3421	opspec := tf.OpSpec{
3422		Type: "IsBoostedTreesQuantileStreamResourceInitialized",
3423		Input: []tf.Input{
3424			quantile_stream_resource_handle,
3425		},
3426	}
3427	op := scope.AddOperation(opspec)
3428	return op.Output(0)
3429}
3430
3431// Calculates the prior from the training data (the bias) and fills in the first node with the logits' prior. Returns a boolean indicating whether to continue centering.
3432//
3433// Arguments:
3434//	tree_ensemble_handle: Handle to the tree ensemble.
3435//	mean_gradients: A tensor with shape=[logits_dimension] with mean of gradients for a first node.
3436//	mean_hessians: A tensor with shape=[logits_dimension] mean of hessians for a first node.
3437//	l1: l1 regularization factor on leaf weights, per instance based.
3438//	l2: l2 regularization factor on leaf weights, per instance based.
3439//
3440// Returns Bool, whether to continue bias centering.
3441func BoostedTreesCenterBias(scope *Scope, tree_ensemble_handle tf.Output, mean_gradients tf.Output, mean_hessians tf.Output, l1 tf.Output, l2 tf.Output) (continue_centering tf.Output) {
3442	if scope.Err() != nil {
3443		return
3444	}
3445	opspec := tf.OpSpec{
3446		Type: "BoostedTreesCenterBias",
3447		Input: []tf.Input{
3448			tree_ensemble_handle, mean_gradients, mean_hessians, l1, l2,
3449		},
3450	}
3451	op := scope.AddOperation(opspec)
3452	return op.Output(0)
3453}
3454
3455// Runs multiple additive regression ensemble predictors on input instances and
3456//
3457// computes the update to cached logits. It is designed to be used during training.
3458// It traverses the trees starting from cached tree id and cached node id and
3459// calculates the updates to be pushed to the cache.
3460//
3461// Arguments:
3462//
3463//	cached_tree_ids: Rank 1 Tensor containing cached tree ids which is the starting
3464// tree of prediction.
3465//	cached_node_ids: Rank 1 Tensor containing cached node id which is the starting
3466// node of prediction.
3467//	bucketized_features: A list of rank 1 Tensors containing bucket id for each
3468// feature.
3469//	logits_dimension: scalar, dimension of the logits, to be used for partial logits
3470// shape.
3471//
3472// Returns Rank 2 Tensor containing logits update (with respect to cached
3473// values stored) for each example.Rank 1 Tensor containing new tree ids for each example.Rank 1 Tensor containing new node ids in the new tree_ids.
3474func BoostedTreesTrainingPredict(scope *Scope, tree_ensemble_handle tf.Output, cached_tree_ids tf.Output, cached_node_ids tf.Output, bucketized_features []tf.Output, logits_dimension int64) (partial_logits tf.Output, tree_ids tf.Output, node_ids tf.Output) {
3475	if scope.Err() != nil {
3476		return
3477	}
3478	attrs := map[string]interface{}{"logits_dimension": logits_dimension}
3479	opspec := tf.OpSpec{
3480		Type: "BoostedTreesTrainingPredict",
3481		Input: []tf.Input{
3482			tree_ensemble_handle, cached_tree_ids, cached_node_ids, tf.OutputList(bucketized_features),
3483		},
3484		Attrs: attrs,
3485	}
3486	op := scope.AddOperation(opspec)
3487	return op.Output(0), op.Output(1), op.Output(2)
3488}
3489
3490// Serializes the tree ensemble to a proto.
3491//
3492// Arguments:
3493//	tree_ensemble_handle: Handle to the tree ensemble.
3494//
3495// Returns Stamp token of the tree ensemble resource.Serialized proto of the ensemble.
3496func BoostedTreesSerializeEnsemble(scope *Scope, tree_ensemble_handle tf.Output) (stamp_token tf.Output, tree_ensemble_serialized tf.Output) {
3497	if scope.Err() != nil {
3498		return
3499	}
3500	opspec := tf.OpSpec{
3501		Type: "BoostedTreesSerializeEnsemble",
3502		Input: []tf.Input{
3503			tree_ensemble_handle,
3504		},
3505	}
3506	op := scope.AddOperation(opspec)
3507	return op.Output(0), op.Output(1)
3508}
3509
3510// Debugging/model interpretability outputs for each example.
3511//
3512// It traverses all the trees and computes debug metrics for individual examples,
3513// such as getting split feature ids and logits after each split along the decision
3514// path used to compute directional feature contributions.
3515//
3516// Arguments:
3517//
3518//	bucketized_features: A list of rank 1 Tensors containing bucket id for each
3519// feature.
3520//	logits_dimension: scalar, dimension of the logits, to be used for constructing the protos in
3521// examples_debug_outputs_serialized.
3522//
3523// Returns Output rank 1 Tensor containing a proto serialized as a string for each example.
3524func BoostedTreesExampleDebugOutputs(scope *Scope, tree_ensemble_handle tf.Output, bucketized_features []tf.Output, logits_dimension int64) (examples_debug_outputs_serialized tf.Output) {
3525	if scope.Err() != nil {
3526		return
3527	}
3528	attrs := map[string]interface{}{"logits_dimension": logits_dimension}
3529	opspec := tf.OpSpec{
3530		Type: "BoostedTreesExampleDebugOutputs",
3531		Input: []tf.Input{
3532			tree_ensemble_handle, tf.OutputList(bucketized_features),
3533		},
3534		Attrs: attrs,
3535	}
3536	op := scope.AddOperation(opspec)
3537	return op.Output(0)
3538}
3539
3540// Makes the summary of accumulated stats for the batch.
3541//
3542// The summary stats contains gradients and hessians accumulated into the corresponding node and bucket for each example.
3543//
3544// Arguments:
3545//	node_ids: int32 Rank 1 Tensor containing node ids, which each example falls into for the requested layer.
3546//	gradients: float32; Rank 2 Tensor (shape=[#examples, 1]) for gradients.
3547//	hessians: float32; Rank 2 Tensor (shape=[#examples, 1]) for hessians.
3548//	bucketized_features_list: int32 list of Rank 1 Tensors, each containing the bucketized feature (for each feature column).
3549//	max_splits: int; the maximum number of splits possible in the whole tree.
3550//	num_buckets: int; equals to the maximum possible value of bucketized feature.
3551//
3552// Returns output Rank 4 Tensor (shape=[#features, #splits, #buckets, 2]) containing accumulated stats put into the corresponding node and bucket. The first index of 4th dimension refers to gradients, and the second to hessians.
3553func BoostedTreesMakeStatsSummary(scope *Scope, node_ids tf.Output, gradients tf.Output, hessians tf.Output, bucketized_features_list []tf.Output, max_splits int64, num_buckets int64) (stats_summary tf.Output) {
3554	if scope.Err() != nil {
3555		return
3556	}
3557	attrs := map[string]interface{}{"max_splits": max_splits, "num_buckets": num_buckets}
3558	opspec := tf.OpSpec{
3559		Type: "BoostedTreesMakeStatsSummary",
3560		Input: []tf.Input{
3561			node_ids, gradients, hessians, tf.OutputList(bucketized_features_list),
3562		},
3563		Attrs: attrs,
3564	}
3565	op := scope.AddOperation(opspec)
3566	return op.Output(0)
3567}
3568
3569// Creates a tree ensemble model and returns a handle to it.
3570//
3571// Arguments:
3572//	tree_ensemble_handle: Handle to the tree ensemble resource to be created.
3573//	stamp_token: Token to use as the initial value of the resource stamp.
3574//	tree_ensemble_serialized: Serialized proto of the tree ensemble.
3575//
3576// Returns the created operation.
3577func BoostedTreesCreateEnsemble(scope *Scope, tree_ensemble_handle tf.Output, stamp_token tf.Output, tree_ensemble_serialized tf.Output) (o *tf.Operation) {
3578	if scope.Err() != nil {
3579		return
3580	}
3581	opspec := tf.OpSpec{
3582		Type: "BoostedTreesCreateEnsemble",
3583		Input: []tf.Input{
3584			tree_ensemble_handle, stamp_token, tree_ensemble_serialized,
3585		},
3586	}
3587	return scope.AddOperation(opspec)
3588}
3589
3590// Checks whether a tree ensemble has been initialized.
3591//
3592// Arguments:
3593//	tree_ensemble_handle: Handle to the tree ensemble resouce.
3594//
3595// Returns output boolean on whether it is initialized or not.
3596func IsBoostedTreesEnsembleInitialized(scope *Scope, tree_ensemble_handle tf.Output) (is_initialized tf.Output) {
3597	if scope.Err() != nil {
3598		return
3599	}
3600	opspec := tf.OpSpec{
3601		Type: "IsBoostedTreesEnsembleInitialized",
3602		Input: []tf.Input{
3603			tree_ensemble_handle,
3604		},
3605	}
3606	op := scope.AddOperation(opspec)
3607	return op.Output(0)
3608}
3609
3610// BoostedTreesEnsembleResourceHandleOpAttr is an optional argument to BoostedTreesEnsembleResourceHandleOp.
3611type BoostedTreesEnsembleResourceHandleOpAttr func(optionalAttr)
3612
3613// BoostedTreesEnsembleResourceHandleOpContainer sets the optional container attribute to value.
3614// If not specified, defaults to ""
3615func BoostedTreesEnsembleResourceHandleOpContainer(value string) BoostedTreesEnsembleResourceHandleOpAttr {
3616	return func(m optionalAttr) {
3617		m["container"] = value
3618	}
3619}
3620
3621// BoostedTreesEnsembleResourceHandleOpSharedName sets the optional shared_name attribute to value.
3622// If not specified, defaults to ""
3623func BoostedTreesEnsembleResourceHandleOpSharedName(value string) BoostedTreesEnsembleResourceHandleOpAttr {
3624	return func(m optionalAttr) {
3625		m["shared_name"] = value
3626	}
3627}
3628
3629// Creates a handle to a BoostedTreesEnsembleResource
3630func BoostedTreesEnsembleResourceHandleOp(scope *Scope, optional ...BoostedTreesEnsembleResourceHandleOpAttr) (resource tf.Output) {
3631	if scope.Err() != nil {
3632		return
3633	}
3634	attrs := map[string]interface{}{}
3635	for _, a := range optional {
3636		a(attrs)
3637	}
3638	opspec := tf.OpSpec{
3639		Type: "BoostedTreesEnsembleResourceHandleOp",
3640
3641		Attrs: attrs,
3642	}
3643	op := scope.AddOperation(opspec)
3644	return op.Output(0)
3645}
3646
3647// Output the logits for the given input data
3648//
3649// Arguments:
3650//	tree_handle: Handle to the tree resource.
3651//	dense_features: Rank 2 dense features tensor.
3652//	logits_dimension: Scalar, dimension of the logits.
3653//
3654// Returns The logits predictions from the tree for each instance in the batch.
3655func TensorForestTreePredict(scope *Scope, tree_handle tf.Output, dense_features tf.Output, logits_dimension int64) (logits tf.Output) {
3656	if scope.Err() != nil {
3657		return
3658	}
3659	attrs := map[string]interface{}{"logits_dimension": logits_dimension}
3660	opspec := tf.OpSpec{
3661		Type: "TensorForestTreePredict",
3662		Input: []tf.Input{
3663			tree_handle, dense_features,
3664		},
3665		Attrs: attrs,
3666	}
3667	op := scope.AddOperation(opspec)
3668	return op.Output(0)
3669}
3670
3671// Get the number of nodes in a tree
3672//
3673// Arguments:
3674//	tree_handle: Handle to the tree resource.
3675//
3676// Returns The size of the tree.
3677func TensorForestTreeSize(scope *Scope, tree_handle tf.Output) (tree_size tf.Output) {
3678	if scope.Err() != nil {
3679		return
3680	}
3681	opspec := tf.OpSpec{
3682		Type: "TensorForestTreeSize",
3683		Input: []tf.Input{
3684			tree_handle,
3685		},
3686	}
3687	op := scope.AddOperation(opspec)
3688	return op.Output(0)
3689}
3690
3691// Creates a tree resource and returns a handle to it.
3692//
3693// Arguments:
3694//	tree_handle: Handle to the tree resource to be created.
3695//	tree_config: Serialized proto string of the boosted_trees.Tree.
3696//
3697// Returns the created operation.
3698func TensorForestCreateTreeVariable(scope *Scope, tree_handle tf.Output, tree_config tf.Output) (o *tf.Operation) {
3699	if scope.Err() != nil {
3700		return
3701	}
3702	opspec := tf.OpSpec{
3703		Type: "TensorForestCreateTreeVariable",
3704		Input: []tf.Input{
3705			tree_handle, tree_config,
3706		},
3707	}
3708	return scope.AddOperation(opspec)
3709}
3710
3711// ComputeAccidentalHitsAttr is an optional argument to ComputeAccidentalHits.
3712type ComputeAccidentalHitsAttr func(optionalAttr)
3713
3714// ComputeAccidentalHitsSeed sets the optional seed attribute to value.
3715//
3716// value: If either seed or seed2 are set to be non-zero, the random number
3717// generator is seeded by the given seed.  Otherwise, it is seeded by a
3718// random seed.
3719// If not specified, defaults to 0
3720func ComputeAccidentalHitsSeed(value int64) ComputeAccidentalHitsAttr {
3721	return func(m optionalAttr) {
3722		m["seed"] = value
3723	}
3724}
3725
3726// ComputeAccidentalHitsSeed2 sets the optional seed2 attribute to value.
3727//
3728// value: An second seed to avoid seed collision.
3729// If not specified, defaults to 0
3730func ComputeAccidentalHitsSeed2(value int64) ComputeAccidentalHitsAttr {
3731	return func(m optionalAttr) {
3732		m["seed2"] = value
3733	}
3734}
3735
3736// Computes the ids of the positions in sampled_candidates that match true_labels.
3737//
3738// When doing log-odds NCE, the result of this op should be passed through a
3739// SparseToDense op, then added to the logits of the sampled candidates. This has
3740// the effect of 'removing' the sampled labels that match the true labels by
3741// making the classifier sure that they are sampled labels.
3742//
3743// Arguments:
3744//	true_classes: The true_classes output of UnpackSparseLabels.
3745//	sampled_candidates: The sampled_candidates output of CandidateSampler.
3746//	num_true: Number of true labels per context.
3747//
3748// Returns A vector of indices corresponding to rows of true_candidates.A vector of IDs of positions in sampled_candidates that match a true_label
3749// for the row with the corresponding index in indices.A vector of the same length as indices and ids, in which each element
3750// is -FLOAT_MAX.
3751func ComputeAccidentalHits(scope *Scope, true_classes tf.Output, sampled_candidates tf.Output, num_true int64, optional ...ComputeAccidentalHitsAttr) (indices tf.Output, ids tf.Output, weights tf.Output) {
3752	if scope.Err() != nil {
3753		return
3754	}
3755	attrs := map[string]interface{}{"num_true": num_true}
3756	for _, a := range optional {
3757		a(attrs)
3758	}
3759	opspec := tf.OpSpec{
3760		Type: "ComputeAccidentalHits",
3761		Input: []tf.Input{
3762			true_classes, sampled_candidates,
3763		},
3764		Attrs: attrs,
3765	}
3766	op := scope.AddOperation(opspec)
3767	return op.Output(0), op.Output(1), op.Output(2)
3768}
3769
3770// FixedUnigramCandidateSamplerAttr is an optional argument to FixedUnigramCandidateSampler.
3771type FixedUnigramCandidateSamplerAttr func(optionalAttr)
3772
3773// FixedUnigramCandidateSamplerVocabFile sets the optional vocab_file attribute to value.
3774//
3775// value: Each valid line in this file (which should have a CSV-like format)
3776// corresponds to a valid word ID. IDs are in sequential order, starting from
3777// num_reserved_ids. The last entry in each line is expected to be a value
3778// corresponding to the count or relative probability. Exactly one of vocab_file
3779// and unigrams needs to be passed to this op.
3780// If not specified, defaults to ""
3781func FixedUnigramCandidateSamplerVocabFile(value string) FixedUnigramCandidateSamplerAttr {
3782	return func(m optionalAttr) {
3783		m["vocab_file"] = value
3784	}
3785}
3786
3787// FixedUnigramCandidateSamplerDistortion sets the optional distortion attribute to value.
3788//
3789// value: The distortion is used to skew the unigram probability distribution.
3790// Each weight is first raised to the distortion's power before adding to the
3791// internal unigram distribution. As a result, distortion = 1.0 gives regular
3792// unigram sampling (as defined by the vocab file), and distortion = 0.0 gives
3793// a uniform distribution.
3794// If not specified, defaults to 1
3795func FixedUnigramCandidateSamplerDistortion(value float32) FixedUnigramCandidateSamplerAttr {
3796	return func(m optionalAttr) {
3797		m["distortion"] = value
3798	}
3799}
3800
3801// FixedUnigramCandidateSamplerNumReservedIds sets the optional num_reserved_ids attribute to value.
3802//
3803// value: Optionally some reserved IDs can be added in the range [0,
3804// ..., num_reserved_ids) by the users. One use case is that a special unknown
3805// word token is used as ID 0. These IDs will have a sampling probability of 0.
3806// If not specified, defaults to 0
3807func FixedUnigramCandidateSamplerNumReservedIds(value int64) FixedUnigramCandidateSamplerAttr {
3808	return func(m optionalAttr) {
3809		m["num_reserved_ids"] = value
3810	}
3811}
3812
3813// FixedUnigramCandidateSamplerNumShards sets the optional num_shards attribute to value.
3814//
3815// value: A sampler can be used to sample from a subset of the original range
3816// in order to speed up the whole computation through parallelism. This parameter
3817// (together with 'shard') indicates the number of partitions that are being
3818// used in the overall computation.
3819// If not specified, defaults to 1
3820//
3821// REQUIRES: value >= 1
3822func FixedUnigramCandidateSamplerNumShards(value int64) FixedUnigramCandidateSamplerAttr {
3823	return func(m optionalAttr) {
3824		m["num_shards"] = value
3825	}
3826}
3827
3828// FixedUnigramCandidateSamplerShard sets the optional shard attribute to value.
3829//
3830// value: A sampler can be used to sample from a subset of the original range
3831// in order to speed up the whole computation through parallelism. This parameter
3832// (together with 'num_shards') indicates the particular partition number of a
3833// sampler op, when partitioning is being used.
3834// If not specified, defaults to 0
3835//
3836// REQUIRES: value >= 0
3837func FixedUnigramCandidateSamplerShard(value int64) FixedUnigramCandidateSamplerAttr {
3838	return func(m optionalAttr) {
3839		m["shard"] = value
3840	}
3841}
3842
3843// FixedUnigramCandidateSamplerUnigrams sets the optional unigrams attribute to value.
3844//
3845// value: A list of unigram counts or probabilities, one per ID in sequential
3846// order. Exactly one of vocab_file and unigrams should be passed to this op.
3847// If not specified, defaults to <>
3848func FixedUnigramCandidateSamplerUnigrams(value []float32) FixedUnigramCandidateSamplerAttr {
3849	return func(m optionalAttr) {
3850		m["unigrams"] = value
3851	}
3852}
3853
3854// FixedUnigramCandidateSamplerSeed sets the optional seed attribute to value.
3855//
3856// value: If either seed or seed2 are set to be non-zero, the random number
3857// generator is seeded by the given seed.  Otherwise, it is seeded by a
3858// random seed.
3859// If not specified, defaults to 0
3860func FixedUnigramCandidateSamplerSeed(value int64) FixedUnigramCandidateSamplerAttr {
3861	return func(m optionalAttr) {
3862		m["seed"] = value
3863	}
3864}
3865
3866// FixedUnigramCandidateSamplerSeed2 sets the optional seed2 attribute to value.
3867//
3868// value: An second seed to avoid seed collision.
3869// If not specified, defaults to 0
3870func FixedUnigramCandidateSamplerSeed2(value int64) FixedUnigramCandidateSamplerAttr {
3871	return func(m optionalAttr) {
3872		m["seed2"] = value
3873	}
3874}
3875
3876// Generates labels for candidate sampling with a learned unigram distribution.
3877//
3878// A unigram sampler could use a fixed unigram distribution read from a
3879// file or passed in as an in-memory array instead of building up the distribution
3880// from data on the fly. There is also an option to skew the distribution by
3881// applying a distortion power to the weights.
3882//
3883// The vocabulary file should be in CSV-like format, with the last field
3884// being the weight associated with the word.
3885//
3886// For each batch, this op picks a single set of sampled candidate labels.
3887//
3888// The advantages of sampling candidates per-batch are simplicity and the
3889// possibility of efficient dense matrix multiplication. The disadvantage is that
3890// the sampled candidates must be chosen independently of the context and of the
3891// true labels.
3892//
3893// Arguments:
3894//	true_classes: A batch_size * num_true matrix, in which each row contains the
3895// IDs of the num_true target_classes in the corresponding original label.
3896//	num_true: Number of true labels per context.
3897//	num_sampled: Number of candidates to randomly sample.
3898//	unique: If unique is true, we sample with rejection, so that all sampled
3899// candidates in a batch are unique. This requires some approximation to
3900// estimate the post-rejection sampling probabilities.
3901//	range_max: The sampler will sample integers from the interval [0, range_max).
3902//
3903// Returns A vector of length num_sampled, in which each element is
3904// the ID of a sampled candidate.A batch_size * num_true matrix, representing
3905// the number of times each candidate is expected to occur in a batch
3906// of sampled candidates. If unique=true, then this is a probability.A vector of length num_sampled, for each sampled
3907// candidate representing the number of times the candidate is expected
3908// to occur in a batch of sampled candidates.  If unique=true, then this is a
3909// probability.
3910func FixedUnigramCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, range_max int64, optional ...FixedUnigramCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output) {
3911	if scope.Err() != nil {
3912		return
3913	}
3914	attrs := map[string]interface{}{"num_true": num_true, "num_sampled": num_sampled, "unique": unique, "range_max": range_max}
3915	for _, a := range optional {
3916		a(attrs)
3917	}
3918	opspec := tf.OpSpec{
3919		Type: "FixedUnigramCandidateSampler",
3920		Input: []tf.Input{
3921			true_classes,
3922		},
3923		Attrs: attrs,
3924	}
3925	op := scope.AddOperation(opspec)
3926	return op.Output(0), op.Output(1), op.Output(2)
3927}
3928
3929// LogUniformCandidateSamplerAttr is an optional argument to LogUniformCandidateSampler.
3930type LogUniformCandidateSamplerAttr func(optionalAttr)
3931
3932// LogUniformCandidateSamplerSeed sets the optional seed attribute to value.
3933//
3934// value: If either seed or seed2 are set to be non-zero, the random number
3935// generator is seeded by the given seed.  Otherwise, it is seeded by a
3936// random seed.
3937// If not specified, defaults to 0
3938func LogUniformCandidateSamplerSeed(value int64) LogUniformCandidateSamplerAttr {
3939	return func(m optionalAttr) {
3940		m["seed"] = value
3941	}
3942}
3943
3944// LogUniformCandidateSamplerSeed2 sets the optional seed2 attribute to value.
3945//
3946// value: An second seed to avoid seed collision.
3947// If not specified, defaults to 0
3948func LogUniformCandidateSamplerSeed2(value int64) LogUniformCandidateSamplerAttr {
3949	return func(m optionalAttr) {
3950		m["seed2"] = value
3951	}
3952}
3953
3954// Generates labels for candidate sampling with a log-uniform distribution.
3955//
3956// See explanations of candidate sampling and the data formats at
3957// go/candidate-sampling.
3958//
3959// For each batch, this op picks a single set of sampled candidate labels.
3960//
3961// The advantages of sampling candidates per-batch are simplicity and the
3962// possibility of efficient dense matrix multiplication. The disadvantage is that
3963// the sampled candidates must be chosen independently of the context and of the
3964// true labels.
3965//
3966// Arguments:
3967//	true_classes: A batch_size * num_true matrix, in which each row contains the
3968// IDs of the num_true target_classes in the corresponding original label.
3969//	num_true: Number of true labels per context.
3970//	num_sampled: Number of candidates to randomly sample.
3971//	unique: If unique is true, we sample with rejection, so that all sampled
3972// candidates in a batch are unique. This requires some approximation to
3973// estimate the post-rejection sampling probabilities.
3974//	range_max: The sampler will sample integers from the interval [0, range_max).
3975//
3976// Returns A vector of length num_sampled, in which each element is
3977// the ID of a sampled candidate.A batch_size * num_true matrix, representing
3978// the number of times each candidate is expected to occur in a batch
3979// of sampled candidates. If unique=true, then this is a probability.A vector of length num_sampled, for each sampled
3980// candidate representing the number of times the candidate is expected
3981// to occur in a batch of sampled candidates.  If unique=true, then this is a
3982// probability.
3983func LogUniformCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, range_max int64, optional ...LogUniformCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output) {
3984	if scope.Err() != nil {
3985		return
3986	}
3987	attrs := map[string]interface{}{"num_true": num_true, "num_sampled": num_sampled, "unique": unique, "range_max": range_max}
3988	for _, a := range optional {
3989		a(attrs)
3990	}
3991	opspec := tf.OpSpec{
3992		Type: "LogUniformCandidateSampler",
3993		Input: []tf.Input{
3994			true_classes,
3995		},
3996		Attrs: attrs,
3997	}
3998	op := scope.AddOperation(opspec)
3999	return op.Output(0), op.Output(1), op.Output(2)
4000}
4001
4002// UniformCandidateSamplerAttr is an optional argument to UniformCandidateSampler.
4003type UniformCandidateSamplerAttr func(optionalAttr)
4004
4005// UniformCandidateSamplerSeed sets the optional seed attribute to value.
4006//
4007// value: If either seed or seed2 are set to be non-zero, the random number
4008// generator is seeded by the given seed.  Otherwise, it is seeded by a
4009// random seed.
4010// If not specified, defaults to 0
4011func UniformCandidateSamplerSeed(value int64) UniformCandidateSamplerAttr {
4012	return func(m optionalAttr) {
4013		m["seed"] = value
4014	}
4015}
4016
4017// UniformCandidateSamplerSeed2 sets the optional seed2 attribute to value.
4018//
4019// value: An second seed to avoid seed collision.
4020// If not specified, defaults to 0
4021func UniformCandidateSamplerSeed2(value int64) UniformCandidateSamplerAttr {
4022	return func(m optionalAttr) {
4023		m["seed2"] = value
4024	}
4025}
4026
4027// Generates labels for candidate sampling with a uniform distribution.
4028//
4029// See explanations of candidate sampling and the data formats at
4030// go/candidate-sampling.
4031//
4032// For each batch, this op picks a single set of sampled candidate labels.
4033//
4034// The advantages of sampling candidates per-batch are simplicity and the
4035// possibility of efficient dense matrix multiplication. The disadvantage is that
4036// the sampled candidates must be chosen independently of the context and of the
4037// true labels.
4038//
4039// Arguments:
4040//	true_classes: A batch_size * num_true matrix, in which each row contains the
4041// IDs of the num_true target_classes in the corresponding original label.
4042//	num_true: Number of true labels per context.
4043//	num_sampled: Number of candidates to randomly sample.
4044//	unique: If unique is true, we sample with rejection, so that all sampled
4045// candidates in a batch are unique. This requires some approximation to
4046// estimate the post-rejection sampling probabilities.
4047//	range_max: The sampler will sample integers from the interval [0, range_max).
4048//
4049// Returns A vector of length num_sampled, in which each element is
4050// the ID of a sampled candidate.A batch_size * num_true matrix, representing
4051// the number of times each candidate is expected to occur in a batch
4052// of sampled candidates. If unique=true, then this is a probability.A vector of length num_sampled, for each sampled
4053// candidate representing the number of times the candidate is expected
4054// to occur in a batch of sampled candidates.  If unique=true, then this is a
4055// probability.
4056func UniformCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, range_max int64, optional ...UniformCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output) {
4057	if scope.Err() != nil {
4058		return
4059	}
4060	attrs := map[string]interface{}{"num_true": num_true, "num_sampled": num_sampled, "unique": unique, "range_max": range_max}
4061	for _, a := range optional {
4062		a(attrs)
4063	}
4064	opspec := tf.OpSpec{
4065		Type: "UniformCandidateSampler",
4066		Input: []tf.Input{
4067			true_classes,
4068		},
4069		Attrs: attrs,
4070	}
4071	op := scope.AddOperation(opspec)
4072	return op.Output(0), op.Output(1), op.Output(2)
4073}
4074
4075// Broadcasts a tensor value to one or more other devices.
4076func CollectiveBcastSend(scope *Scope, input tf.Output, group_size int64, group_key int64, instance_key int64, shape tf.Shape) (data tf.Output) {
4077	if scope.Err() != nil {
4078		return
4079	}
4080	attrs := map[string]interface{}{"group_size": group_size, "group_key": group_key, "instance_key": instance_key, "shape": shape}
4081	opspec := tf.OpSpec{
4082		Type: "CollectiveBcastSend",
4083		Input: []tf.Input{
4084			input,
4085		},
4086		Attrs: attrs,
4087	}
4088	op := scope.AddOperation(opspec)
4089	return op.Output(0)
4090}
4091
4092// Mutually accumulates multiple tensors of identical type and shape.
4093func CollectiveGather(scope *Scope, input tf.Output, group_size int64, group_key int64, instance_key int64, shape tf.Shape) (data tf.Output) {
4094	if scope.Err() != nil {
4095		return
4096	}
4097	attrs := map[string]interface{}{"group_size": group_size, "group_key": group_key, "instance_key": instance_key, "shape": shape}
4098	opspec := tf.OpSpec{
4099		Type: "CollectiveGather",
4100		Input: []tf.Input{
4101			input,
4102		},
4103		Attrs: attrs,
4104	}
4105	op := scope.AddOperation(opspec)
4106	return op.Output(0)
4107}
4108
4109// CollectiveReduceAttr is an optional argument to CollectiveReduce.
4110type CollectiveReduceAttr func(optionalAttr)
4111
4112// CollectiveReduceWaitFor sets the optional wait_for attribute to value.
4113// If not specified, defaults to <>
4114func CollectiveReduceWaitFor(value []int64) CollectiveReduceAttr {
4115	return func(m optionalAttr) {
4116		m["wait_for"] = value
4117	}
4118}
4119
4120// Mutually reduces multiple tensors of identical type and shape.
4121func CollectiveReduce(scope *Scope, input tf.Output, group_size int64, group_key int64, instance_key int64, merge_op string, final_op string, subdiv_offsets []int64, optional ...CollectiveReduceAttr) (data tf.Output) {
4122	if scope.Err() != nil {
4123		return
4124	}
4125	attrs := map[string]interface{}{"group_size": group_size, "group_key": group_key, "instance_key": instance_key, "merge_op": merge_op, "final_op": final_op, "subdiv_offsets": subdiv_offsets}
4126	for _, a := range optional {
4127		a(attrs)
4128	}
4129	opspec := tf.OpSpec{
4130		Type: "CollectiveReduce",
4131		Input: []tf.Input{
4132			input,
4133		},
4134		Attrs: attrs,
4135	}
4136	op := scope.AddOperation(opspec)
4137	return op.Output(0)
4138}
4139
4140// AbortAttr is an optional argument to Abort.
4141type AbortAttr func(optionalAttr)
4142
4143// AbortErrorMsg sets the optional error_msg attribute to value.
4144//
4145// value: A string which is the message associated with the exception.
4146// If not specified, defaults to ""
4147func AbortErrorMsg(value string) AbortAttr {
4148	return func(m optionalAttr) {
4149		m["error_msg"] = value
4150	}
4151}
4152
4153// AbortExitWithoutError sets the optional exit_without_error attribute to value.
4154// If not specified, defaults to false
4155func AbortExitWithoutError(value bool) AbortAttr {
4156	return func(m optionalAttr) {
4157		m["exit_without_error"] = value
4158	}
4159}
4160
4161// Raise a exception to abort the process when called.
4162//
4163// If exit_without_error is true, the process will exit normally,
4164// otherwise it will exit with a SIGABORT signal.
4165//
4166// Returns nothing but an exception.
4167//
4168// Returns the created operation.
4169func Abort(scope *Scope, optional ...AbortAttr) (o *tf.Operation) {
4170	if scope.Err() != nil {
4171		return
4172	}
4173	attrs := map[string]interface{}{}
4174	for _, a := range optional {
4175		a(attrs)
4176	}
4177	opspec := tf.OpSpec{
4178		Type: "Abort",
4179
4180		Attrs: attrs,
4181	}
4182	return scope.AddOperation(opspec)
4183}
4184
4185// Forwards the input to the output.
4186//
4187// This operator represents the loop termination condition used by the
4188// "pivot" switches of a loop.
4189//
4190// Arguments:
4191//	input: A boolean scalar, representing the branch predicate of the Switch op.
4192//
4193// Returns The same tensor as `input`.
4194func LoopCond(scope *Scope, input tf.Output) (output tf.Output) {
4195	if scope.Err() != nil {
4196		return
4197	}
4198	opspec := tf.OpSpec{
4199		Type: "LoopCond",
4200		Input: []tf.Input{
4201			input,
4202		},
4203	}
4204	op := scope.AddOperation(opspec)
4205	return op.Output(0)
4206}
4207
4208// Returns a tensor of zeros with the same shape and type as x.
4209//
4210// Arguments:
4211//	x: a tensor of type T.
4212//
4213// Returns a tensor of the same shape and type as x but filled with zeros.
4214func ZerosLike(scope *Scope, x tf.Output) (y tf.Output) {
4215	if scope.Err() != nil {
4216		return
4217	}
4218	opspec := tf.OpSpec{
4219		Type: "ZerosLike",
4220		Input: []tf.Input{
4221			x,
4222		},
4223	}
4224	op := scope.AddOperation(opspec)
4225	return op.Output(0)
4226}
4227
4228// Returns a copy of the input tensor.
4229func Snapshot(scope *Scope, input tf.Output) (output tf.Output) {
4230	if scope.Err() != nil {
4231		return
4232	}
4233	opspec := tf.OpSpec{
4234		Type: "Snapshot",
4235		Input: []tf.Input{
4236			input,
4237		},
4238	}
4239	op := scope.AddOperation(opspec)
4240	return op.Output(0)
4241}
4242
4243// Forwards `data` to the output port determined by `pred`.
4244//
4245// If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise,
4246// the data goes to `output_false`.
4247//
4248// See also `RefSwitch` and `Merge`.
4249//
4250// Arguments:
4251//	data: The tensor to be forwarded to the appropriate output.
4252//	pred: A scalar that specifies which output port will receive data.
4253//
4254// Returns If `pred` is false, data will be forwarded to this output.If `pred` is true, data will be forwarded to this output.
4255func Switch(scope *Scope, data tf.Output, pred tf.Output) (output_false tf.Output, output_true tf.Output) {
4256	if scope.Err() != nil {
4257		return
4258	}
4259	opspec := tf.OpSpec{
4260		Type: "Switch",
4261		Input: []tf.Input{
4262			data, pred,
4263		},
4264	}
4265	op := scope.AddOperation(opspec)
4266	return op.Output(0), op.Output(1)
4267}
4268
4269// AudioSpectrogramAttr is an optional argument to AudioSpectrogram.
4270type AudioSpectrogramAttr func(optionalAttr)
4271
4272// AudioSpectrogramMagnitudeSquared sets the optional magnitude_squared attribute to value.
4273//
4274// value: Whether to return the squared magnitude or just the
4275// magnitude. Using squared magnitude can avoid extra calculations.
4276// If not specified, defaults to false
4277func AudioSpectrogramMagnitudeSquared(value bool) AudioSpectrogramAttr {
4278	return func(m optionalAttr) {
4279		m["magnitude_squared"] = value
4280	}
4281}
4282
4283// Produces a visualization of audio data over time.
4284//
4285// Spectrograms are a standard way of representing audio information as a series of
4286// slices of frequency information, one slice for each window of time. By joining
4287// these together into a sequence, they form a distinctive fingerprint of the sound
4288// over time.
4289//
4290// This op expects to receive audio data as an input, stored as floats in the range
4291// -1 to 1, together with a window width in samples, and a stride specifying how
4292// far to move the window between slices. From this it generates a three
4293// dimensional output. The lowest dimension has an amplitude value for each
4294// frequency during that time slice. The next dimension is time, with successive
4295// frequency slices. The final dimension is for the channels in the input, so a
4296// stereo audio input would have two here for example.
4297//
4298// This means the layout when converted and saved as an image is rotated 90 degrees
4299// clockwise from a typical spectrogram. Time is descending down the Y axis, and
4300// the frequency decreases from left to right.
4301//
4302// Each value in the result represents the square root of the sum of the real and
4303// imaginary parts of an FFT on the current window of samples. In this way, the
4304// lowest dimension represents the power of each frequency in the current window,
4305// and adjacent windows are concatenated in the next dimension.
4306//
4307// To get a more intuitive and visual look at what this operation does, you can run
4308// tensorflow/examples/wav_to_spectrogram to read in an audio file and save out the
4309// resulting spectrogram as a PNG image.
4310//
4311// Arguments:
4312//	input: Float representation of audio data.
4313//	window_size: How wide the input window is in samples. For the highest efficiency
4314// this should be a power of two, but other values are accepted.
4315//	stride: How widely apart the center of adjacent sample windows should be.
4316//
4317// Returns 3D representation of the audio frequencies as an image.
4318func AudioSpectrogram(scope *Scope, input tf.Output, window_size int64, stride int64, optional ...AudioSpectrogramAttr) (spectrogram tf.Output) {
4319	if scope.Err() != nil {
4320		return
4321	}
4322	attrs := map[string]interface{}{"window_size": window_size, "stride": stride}
4323	for _, a := range optional {
4324		a(attrs)
4325	}
4326	opspec := tf.OpSpec{
4327		Type: "AudioSpectrogram",
4328		Input: []tf.Input{
4329			input,
4330		},
4331		Attrs: attrs,
4332	}
4333	op := scope.AddOperation(opspec)
4334	return op.Output(0)
4335}
4336
4337// CTCBeamSearchDecoderAttr is an optional argument to CTCBeamSearchDecoder.
4338type CTCBeamSearchDecoderAttr func(optionalAttr)
4339
4340// CTCBeamSearchDecoderMergeRepeated sets the optional merge_repeated attribute to value.
4341//
4342// value: If true, merge repeated classes in output.
4343// If not specified, defaults to true
4344func CTCBeamSearchDecoderMergeRepeated(value bool) CTCBeamSearchDecoderAttr {
4345	return func(m optionalAttr) {
4346		m["merge_repeated"] = value
4347	}
4348}
4349
4350// Performs beam search decoding on the logits given in input.
4351//
4352// A note about the attribute merge_repeated: For the beam search decoder,
4353// this means that if consecutive entries in a beam are the same, only
4354// the first of these is emitted.  That is, when the top path is "A B B B B",
4355// "A B" is returned if merge_repeated = True but "A B B B B" is
4356// returned if merge_repeated = False.
4357//
4358// Arguments:
4359//	inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.
4360//	sequence_length: A vector containing sequence lengths, size `(batch)`.
4361//	beam_width: A scalar >= 0 (beam search beam width).
4362//	top_paths: A scalar >= 0, <= beam_width (controls output size).
4363//
4364// Returns A list (length: top_paths) of indices matrices.  Matrix j,
4365// size `(total_decoded_outputs[j] x 2)`, has indices of a
4366// `SparseTensor<int64, 2>`.  The rows store: [batch, time].A list (length: top_paths) of values vectors.  Vector j,
4367// size `(length total_decoded_outputs[j])`, has the values of a
4368// `SparseTensor<int64, 2>`.  The vector stores the decoded classes for beam j.A list (length: top_paths) of shape vector.  Vector j,
4369// size `(2)`, stores the shape of the decoded `SparseTensor[j]`.
4370// Its values are: `[batch_size, max_decoded_length[j]]`.A matrix, shaped: `(batch_size x top_paths)`.  The
4371// sequence log-probabilities.
4372func CTCBeamSearchDecoder(scope *Scope, inputs tf.Output, sequence_length tf.Output, beam_width int64, top_paths int64, optional ...CTCBeamSearchDecoderAttr) (decoded_indices []tf.Output, decoded_values []tf.Output, decoded_shape []tf.Output, log_probability tf.Output) {
4373	if scope.Err() != nil {
4374		return
4375	}
4376	attrs := map[string]interface{}{"beam_width": beam_width, "top_paths": top_paths}
4377	for _, a := range optional {
4378		a(attrs)
4379	}
4380	opspec := tf.OpSpec{
4381		Type: "CTCBeamSearchDecoder",
4382		Input: []tf.Input{
4383			inputs, sequence_length,
4384		},
4385		Attrs: attrs,
4386	}
4387	op := scope.AddOperation(opspec)
4388	if scope.Err() != nil {
4389		return
4390	}
4391	var idx int
4392	var err error
4393	if decoded_indices, idx, err = makeOutputList(op, idx, "decoded_indices"); err != nil {
4394		scope.UpdateErr("CTCBeamSearchDecoder", err)
4395		return
4396	}
4397	if decoded_values, idx, err = makeOutputList(op, idx, "decoded_values"); err != nil {
4398		scope.UpdateErr("CTCBeamSearchDecoder", err)
4399		return
4400	}
4401	if decoded_shape, idx, err = makeOutputList(op, idx, "decoded_shape"); err != nil {
4402		scope.UpdateErr("CTCBeamSearchDecoder", err)
4403		return
4404	}
4405	log_probability = op.Output(idx)
4406	return decoded_indices, decoded_values, decoded_shape, log_probability
4407}
4408
4409// CTCGreedyDecoderAttr is an optional argument to CTCGreedyDecoder.
4410type CTCGreedyDecoderAttr func(optionalAttr)
4411
4412// CTCGreedyDecoderMergeRepeated sets the optional merge_repeated attribute to value.
4413//
4414// value: If True, merge repeated classes in output.
4415// If not specified, defaults to false
4416func CTCGreedyDecoderMergeRepeated(value bool) CTCGreedyDecoderAttr {
4417	return func(m optionalAttr) {
4418		m["merge_repeated"] = value
4419	}
4420}
4421
4422// Performs greedy decoding on the logits given in inputs.
4423//
4424// A note about the attribute merge_repeated: if enabled, when
4425// consecutive logits' maximum indices are the same, only the first of
4426// these is emitted.  Labeling the blank '*', the sequence "A B B * B B"
4427// becomes "A B B" if merge_repeated = True and "A B B B B" if
4428// merge_repeated = False.
4429//
4430// Regardless of the value of merge_repeated, if the maximum index of a given
4431// time and batch corresponds to the blank, index `(num_classes - 1)`, no new
4432// element is emitted.
4433//
4434// Arguments:
4435//	inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.
4436//	sequence_length: A vector containing sequence lengths, size `(batch_size)`.
4437//
4438// Returns Indices matrix, size `(total_decoded_outputs x 2)`,
4439// of a `SparseTensor<int64, 2>`.  The rows store: [batch, time].Values vector, size: `(total_decoded_outputs)`,
4440// of a `SparseTensor<int64, 2>`.  The vector stores the decoded classes.Shape vector, size `(2)`, of the decoded SparseTensor.
4441// Values are: `[batch_size, max_decoded_length]`.Matrix, size `(batch_size x 1)`, containing sequence
4442// log-probabilities.
4443func CTCGreedyDecoder(scope *Scope, inputs tf.Output, sequence_length tf.Output, optional ...CTCGreedyDecoderAttr) (decoded_indices tf.Output, decoded_values tf.Output, decoded_shape tf.Output, log_probability tf.Output) {
4444	if scope.Err() != nil {
4445		return
4446	}
4447	attrs := map[string]interface{}{}
4448	for _, a := range optional {
4449		a(attrs)
4450	}
4451	opspec := tf.OpSpec{
4452		Type: "CTCGreedyDecoder",
4453		Input: []tf.Input{
4454			inputs, sequence_length,
4455		},
4456		Attrs: attrs,
4457	}
4458	op := scope.AddOperation(opspec)
4459	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
4460}
4461
4462// CTCLossAttr is an optional argument to CTCLoss.
4463type CTCLossAttr func(optionalAttr)
4464
4465// CTCLossPreprocessCollapseRepeated sets the optional preprocess_collapse_repeated attribute to value.
4466//
4467// value: Scalar, if true then repeated labels are
4468// collapsed prior to the CTC calculation.
4469// If not specified, defaults to false
4470func CTCLossPreprocessCollapseRepeated(value bool) CTCLossAttr {
4471	return func(m optionalAttr) {
4472		m["preprocess_collapse_repeated"] = value
4473	}
4474}
4475
4476// CTCLossCtcMergeRepeated sets the optional ctc_merge_repeated attribute to value.
4477//
4478// value: Scalar.  If set to false, *during* CTC calculation
4479// repeated non-blank labels will not be merged and are interpreted as
4480// individual labels.  This is a simplified version of CTC.
4481// If not specified, defaults to true
4482func CTCLossCtcMergeRepeated(value bool) CTCLossAttr {
4483	return func(m optionalAttr) {
4484		m["ctc_merge_repeated"] = value
4485	}
4486}
4487
4488// CTCLossIgnoreLongerOutputsThanInputs sets the optional ignore_longer_outputs_than_inputs attribute to value.
4489//
4490// value: Scalar. If set to true, during CTC
4491// calculation, items that have longer output sequences than input sequences
4492// are skipped: they don't contribute to the loss term and have zero-gradient.
4493// If not specified, defaults to false
4494func CTCLossIgnoreLongerOutputsThanInputs(value bool) CTCLossAttr {
4495	return func(m optionalAttr) {
4496		m["ignore_longer_outputs_than_inputs"] = value
4497	}
4498}
4499
4500// Calculates the CTC Loss (log probability) for each batch entry.  Also calculates
4501//
4502// the gradient.  This class performs the softmax operation for you, so inputs
4503// should be e.g. linear projections of outputs by an LSTM.
4504//
4505// Arguments:
4506//	inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.
4507//	labels_indices: The indices of a `SparseTensor<int32, 2>`.
4508// `labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for
4509// `(batch b, time t)`.
4510//	labels_values: The values (labels) associated with the given batch and time.
4511//	sequence_length: A vector containing sequence lengths (batch).
4512//
4513// Returns A vector (batch) containing log-probabilities.The gradient of `loss`.  3-D, shape:
4514// `(max_time x batch_size x num_classes)`.
4515func CTCLoss(scope *Scope, inputs tf.Output, labels_indices tf.Output, labels_values tf.Output, sequence_length tf.Output, optional ...CTCLossAttr) (loss tf.Output, gradient tf.Output) {
4516	if scope.Err() != nil {
4517		return
4518	}
4519	attrs := map[string]interface{}{}
4520	for _, a := range optional {
4521		a(attrs)
4522	}
4523	opspec := tf.OpSpec{
4524		Type: "CTCLoss",
4525		Input: []tf.Input{
4526			inputs, labels_indices, labels_values, sequence_length,
4527		},
4528		Attrs: attrs,
4529	}
4530	op := scope.AddOperation(opspec)
4531	return op.Output(0), op.Output(1)
4532}
4533
4534// ShapeNAttr is an optional argument to ShapeN.
4535type ShapeNAttr func(optionalAttr)
4536
4537// ShapeNOutType sets the optional out_type attribute to value.
4538// If not specified, defaults to DT_INT32
4539func ShapeNOutType(value tf.DataType) ShapeNAttr {
4540	return func(m optionalAttr) {
4541		m["out_type"] = value
4542	}
4543}
4544
4545// Returns shape of tensors.
4546//
4547// This operation returns N 1-D integer tensors representing shape of `input[i]s`.
4548func ShapeN(scope *Scope, input []tf.Output, optional ...ShapeNAttr) (output []tf.Output) {
4549	if scope.Err() != nil {
4550		return
4551	}
4552	attrs := map[string]interface{}{}
4553	for _, a := range optional {
4554		a(attrs)
4555	}
4556	opspec := tf.OpSpec{
4557		Type: "ShapeN",
4558		Input: []tf.Input{
4559			tf.OutputList(input),
4560		},
4561		Attrs: attrs,
4562	}
4563	op := scope.AddOperation(opspec)
4564	if scope.Err() != nil {
4565		return
4566	}
4567	var idx int
4568	var err error
4569	if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
4570		scope.UpdateErr("ShapeN", err)
4571		return
4572	}
4573	return output
4574}
4575
4576// CudnnRNNParamsToCanonicalAttr is an optional argument to CudnnRNNParamsToCanonical.
4577type CudnnRNNParamsToCanonicalAttr func(optionalAttr)
4578
4579// CudnnRNNParamsToCanonicalRnnMode sets the optional rnn_mode attribute to value.
4580// If not specified, defaults to "lstm"
4581func CudnnRNNParamsToCanonicalRnnMode(value string) CudnnRNNParamsToCanonicalAttr {
4582	return func(m optionalAttr) {
4583		m["rnn_mode"] = value
4584	}
4585}
4586
4587// CudnnRNNParamsToCanonicalInputMode sets the optional input_mode attribute to value.
4588// If not specified, defaults to "linear_input"
4589func CudnnRNNParamsToCanonicalInputMode(value string) CudnnRNNParamsToCanonicalAttr {
4590	return func(m optionalAttr) {
4591		m["input_mode"] = value
4592	}
4593}
4594
4595// CudnnRNNParamsToCanonicalDirection sets the optional direction attribute to value.
4596// If not specified, defaults to "unidirectional"
4597func CudnnRNNParamsToCanonicalDirection(value string) CudnnRNNParamsToCanonicalAttr {
4598	return func(m optionalAttr) {
4599		m["direction"] = value
4600	}
4601}
4602
4603// CudnnRNNParamsToCanonicalDropout sets the optional dropout attribute to value.
4604// If not specified, defaults to 0
4605func CudnnRNNParamsToCanonicalDropout(value float32) CudnnRNNParamsToCanonicalAttr {
4606	return func(m optionalAttr) {
4607		m["dropout"] = value
4608	}
4609}
4610
4611// CudnnRNNParamsToCanonicalSeed sets the optional seed attribute to value.
4612// If not specified, defaults to 0
4613func CudnnRNNParamsToCanonicalSeed(value int64) CudnnRNNParamsToCanonicalAttr {
4614	return func(m optionalAttr) {
4615		m["seed"] = value
4616	}
4617}
4618
4619// CudnnRNNParamsToCanonicalSeed2 sets the optional seed2 attribute to value.
4620// If not specified, defaults to 0
4621func CudnnRNNParamsToCanonicalSeed2(value int64) CudnnRNNParamsToCanonicalAttr {
4622	return func(m optionalAttr) {
4623		m["seed2"] = value
4624	}
4625}
4626
4627// Retrieves CudnnRNN params in canonical form.
4628//
4629// Retrieves a set of weights from the opaque params buffer that can be saved and
4630// restored in a way compatible with future runs.
4631//
4632// Note that the params buffer may not be compatible across different GPUs. So any
4633// save and restoration should be converted to and from the canonical weights and
4634// biases.
4635//
4636// num_layers: Specifies the number of layers in the RNN model.
4637// num_units: Specifies the size of the hidden state.
4638// input_size: Specifies the size of the input state.
4639// num_params: number of parameter sets for all layers.
4640//     Each layer may contain multiple parameter sets, with each set consisting of
4641//     a weight matrix and a bias vector.
4642// weights: the canonical form of weights that can be used for saving
4643//     and restoration. They are more likely to be compatible across different
4644//     generations.
4645// biases: the canonical form of biases that can be used for saving
4646//     and restoration. They are more likely to be compatible across different
4647//     generations.
4648// rnn_mode: Indicates the type of the RNN model.
4649// input_mode: Indicate whether there is a linear projection between the input and
4650//     The actual computation before the first layer. 'skip_input' is only allowed
4651//     when input_size == num_units; 'auto_select' implies 'skip_input' when
4652//     input_size == num_units; otherwise, it implies 'linear_input'.
4653// direction: Indicates whether a bidirectional model will be used.
4654//     dir = (direction == bidirectional) ? 2 : 1
4655// dropout: dropout probability. When set to 0., dropout is disabled.
4656// seed: the 1st part of a seed to initialize dropout.
4657// seed2: the 2nd part of a seed to initialize dropout.
4658func CudnnRNNParamsToCanonical(scope *Scope, num_layers tf.Output, num_units tf.Output, input_size tf.Output, params tf.Output, num_params int64, optional ...CudnnRNNParamsToCanonicalAttr) (weights []tf.Output, biases []tf.Output) {
4659	if scope.Err() != nil {
4660		return
4661	}
4662	attrs := map[string]interface{}{"num_params": num_params}
4663	for _, a := range optional {
4664		a(attrs)
4665	}
4666	opspec := tf.OpSpec{
4667		Type: "CudnnRNNParamsToCanonical",
4668		Input: []tf.Input{
4669			num_layers, num_units, input_size, params,
4670		},
4671		Attrs: attrs,
4672	}
4673	op := scope.AddOperation(opspec)
4674	if scope.Err() != nil {
4675		return
4676	}
4677	var idx int
4678	var err error
4679	if weights, idx, err = makeOutputList(op, idx, "weights"); err != nil {
4680		scope.UpdateErr("CudnnRNNParamsToCanonical", err)
4681		return
4682	}
4683	if biases, idx, err = makeOutputList(op, idx, "biases"); err != nil {
4684		scope.UpdateErr("CudnnRNNParamsToCanonical", err)
4685		return
4686	}
4687	return weights, biases
4688}
4689
4690// CudnnRNNBackpropV3Attr is an optional argument to CudnnRNNBackpropV3.
4691type CudnnRNNBackpropV3Attr func(optionalAttr)
4692
4693// CudnnRNNBackpropV3RnnMode sets the optional rnn_mode attribute to value.
4694// If not specified, defaults to "lstm"
4695func CudnnRNNBackpropV3RnnMode(value string) CudnnRNNBackpropV3Attr {
4696	return func(m optionalAttr) {
4697		m["rnn_mode"] = value
4698	}
4699}
4700
4701// CudnnRNNBackpropV3InputMode sets the optional input_mode attribute to value.
4702// If not specified, defaults to "linear_input"
4703func CudnnRNNBackpropV3InputMode(value string) CudnnRNNBackpropV3Attr {
4704	return func(m optionalAttr) {
4705		m["input_mode"] = value
4706	}
4707}
4708
4709// CudnnRNNBackpropV3Direction sets the optional direction attribute to value.
4710// If not specified, defaults to "unidirectional"
4711func CudnnRNNBackpropV3Direction(value string) CudnnRNNBackpropV3Attr {
4712	return func(m optionalAttr) {
4713		m["direction"] = value
4714	}
4715}
4716
4717// CudnnRNNBackpropV3Dropout sets the optional dropout attribute to value.
4718// If not specified, defaults to 0
4719func CudnnRNNBackpropV3Dropout(value float32) CudnnRNNBackpropV3Attr {
4720	return func(m optionalAttr) {
4721		m["dropout"] = value
4722	}
4723}
4724
4725// CudnnRNNBackpropV3Seed sets the optional seed attribute to value.
4726// If not specified, defaults to 0
4727func CudnnRNNBackpropV3Seed(value int64) CudnnRNNBackpropV3Attr {
4728	return func(m optionalAttr) {
4729		m["seed"] = value
4730	}
4731}
4732
4733// CudnnRNNBackpropV3Seed2 sets the optional seed2 attribute to value.
4734// If not specified, defaults to 0
4735func CudnnRNNBackpropV3Seed2(value int64) CudnnRNNBackpropV3Attr {
4736	return func(m optionalAttr) {
4737		m["seed2"] = value
4738	}
4739}
4740
4741// CudnnRNNBackpropV3TimeMajor sets the optional time_major attribute to value.
4742// If not specified, defaults to true
4743func CudnnRNNBackpropV3TimeMajor(value bool) CudnnRNNBackpropV3Attr {
4744	return func(m optionalAttr) {
4745		m["time_major"] = value
4746	}
4747}
4748
4749// Backprop step of CudnnRNNV3.
4750//
4751// Compute the backprop of both data and weights in a RNN. Takes an extra
4752//     "sequence_lengths" input than CudnnRNNBackprop.
4753//
4754// rnn_mode: Indicates the type of the RNN model.
4755// input_mode: Indicates whether there is a linear projection between the input and
4756//     the actual computation before the first layer. 'skip_input' is only allowed
4757//     when input_size == num_units; 'auto_select' implies 'skip_input' when
4758//     input_size == num_units; otherwise, it implies 'linear_input'.
4759// direction: Indicates whether a bidirectional model will be used. Should be
4760//   "unidirectional" or "bidirectional".
4761// dropout: Dropout probability. When set to 0., dropout is disabled.
4762// seed: The 1st part of a seed to initialize dropout.
4763// seed2: The 2nd part of a seed to initialize dropout.
4764// input: If time_major is true, this is a 3-D tensor with the shape of
4765//     [seq_length, batch_size, input_size]. If time_major is false, the shape is
4766//     [batch_size, seq_length, input_size].
4767// input_h: If time_major is true, this is a 3-D tensor with the shape of
4768//     [num_layer * dir, batch_size, num_units]. If time_major is false, the shape
4769//     is [batch_size, num_layer * dir, num_units].
4770// input_c: For LSTM, a 3-D tensor with the shape of
4771//     [num_layer * dir, batch, num_units]. For other models, it is ignored.
4772// params: A 1-D tensor that contains the weights and biases in an opaque layout.
4773//     The size must be created through CudnnRNNParamsSize, and initialized
4774//     separately. Note that they might not be compatible across different
4775//     generations. So it is a good idea to save and restore
4776// sequence_lengths: a vector of lengths of each input sequence.
4777// output: If time_major is true, this is a 3-D tensor with the shape of
4778//     [seq_length, batch_size, dir * num_units]. If time_major is false, the
4779//     shape is [batch_size, seq_length, dir * num_units].
4780// output_h: The same shape has input_h.
4781// output_c: The same shape as input_c for LSTM. An empty tensor for other models.
4782// output_backprop: A 3-D tensor with the same shape as output in the forward pass.
4783// output_h_backprop: A 3-D tensor with the same shape as output_h in the forward
4784//     pass.
4785// output_c_backprop: A 3-D tensor with the same shape as output_c in the forward
4786//     pass.
4787// time_major: Indicates whether the input/output format is time major or batch
4788//     major.
4789// reserve_space: The same reserve_space produced in the forward operation.
4790// input_backprop: The backprop to input in the forward pass. Has the same shape
4791//     as input.
4792// input_h_backprop: The backprop to input_h in the forward pass. Has the same
4793//     shape as input_h.
4794// input_c_backprop: The backprop to input_c in the forward pass. Has the same
4795//     shape as input_c.
4796// params_backprop: The backprop to the params buffer in the forward pass. Has the
4797//     same shape as params.
4798func CudnnRNNBackpropV3(scope *Scope, input tf.Output, input_h tf.Output, input_c tf.Output, params tf.Output, sequence_lengths tf.Output, output tf.Output, output_h tf.Output, output_c tf.Output, output_backprop tf.Output, output_h_backprop tf.Output, output_c_backprop tf.Output, reserve_space tf.Output, host_reserved tf.Output, optional ...CudnnRNNBackpropV3Attr) (input_backprop tf.Output, input_h_backprop tf.Output, input_c_backprop tf.Output, params_backprop tf.Output) {
4799	if scope.Err() != nil {
4800		return
4801	}
4802	attrs := map[string]interface{}{}
4803	for _, a := range optional {
4804		a(attrs)
4805	}
4806	opspec := tf.OpSpec{
4807		Type: "CudnnRNNBackpropV3",
4808		Input: []tf.Input{
4809			input, input_h, input_c, params, sequence_lengths, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space, host_reserved,
4810		},
4811		Attrs: attrs,
4812	}
4813	op := scope.AddOperation(opspec)
4814	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
4815}
4816
4817// CudnnRNNBackpropV2Attr is an optional argument to CudnnRNNBackpropV2.
4818type CudnnRNNBackpropV2Attr func(optionalAttr)
4819
4820// CudnnRNNBackpropV2RnnMode sets the optional rnn_mode attribute to value.
4821// If not specified, defaults to "lstm"
4822func CudnnRNNBackpropV2RnnMode(value string) CudnnRNNBackpropV2Attr {
4823	return func(m optionalAttr) {
4824		m["rnn_mode"] = value
4825	}
4826}
4827
4828// CudnnRNNBackpropV2InputMode sets the optional input_mode attribute to value.
4829// If not specified, defaults to "linear_input"
4830func CudnnRNNBackpropV2InputMode(value string) CudnnRNNBackpropV2Attr {
4831	return func(m optionalAttr) {
4832		m["input_mode"] = value
4833	}
4834}
4835
4836// CudnnRNNBackpropV2Direction sets the optional direction attribute to value.
4837// If not specified, defaults to "unidirectional"
4838func CudnnRNNBackpropV2Direction(value string) CudnnRNNBackpropV2Attr {
4839	return func(m optionalAttr) {
4840		m["direction"] = value
4841	}
4842}
4843
4844// CudnnRNNBackpropV2Dropout sets the optional dropout attribute to value.
4845// If not specified, defaults to 0
4846func CudnnRNNBackpropV2Dropout(value float32) CudnnRNNBackpropV2Attr {
4847	return func(m optionalAttr) {
4848		m["dropout"] = value
4849	}
4850}
4851
4852// CudnnRNNBackpropV2Seed sets the optional seed attribute to value.
4853// If not specified, defaults to 0
4854func CudnnRNNBackpropV2Seed(value int64) CudnnRNNBackpropV2Attr {
4855	return func(m optionalAttr) {
4856		m["seed"] = value
4857	}
4858}
4859
4860// CudnnRNNBackpropV2Seed2 sets the optional seed2 attribute to value.
4861// If not specified, defaults to 0
4862func CudnnRNNBackpropV2Seed2(value int64) CudnnRNNBackpropV2Attr {
4863	return func(m optionalAttr) {
4864		m["seed2"] = value
4865	}
4866}
4867
4868// Backprop step of CudnnRNN.
4869//
4870// Compute the backprop of both data and weights in a RNN. Takes an extra
4871//     "host_reserved" inupt than CudnnRNNBackprop, which is used to determine RNN
4872//     cudnnRNNAlgo_t and cudnnMathType_t.
4873//
4874// rnn_mode: Indicates the type of the RNN model.
4875// input_mode: Indicates whether there is a linear projection between the input and
4876//     the actual computation before the first layer. 'skip_input' is only allowed
4877//     when input_size == num_units; 'auto_select' implies 'skip_input' when
4878//     input_size == num_units; otherwise, it implies 'linear_input'.
4879// direction: Indicates whether a bidirectional model will be used. Should be
4880//   "unidirectional" or "bidirectional".
4881// dropout: Dropout probability. When set to 0., dropout is disabled.
4882// seed: The 1st part of a seed to initialize dropout.
4883// seed2: The 2nd part of a seed to initialize dropout.
4884// input: A 3-D tensor with the shape of [seq_length, batch_size, input_size].
4885// input_h: A 3-D tensor with the shape of [num_layer * dir, batch_size,
4886//     num_units].
4887// input_c: For LSTM, a 3-D tensor with the shape of
4888//     [num_layer * dir, batch, num_units]. For other models, it is ignored.
4889// params: A 1-D tensor that contains the weights and biases in an opaque layout.
4890//     The size must be created through CudnnRNNParamsSize, and initialized
4891//     separately. Note that they might not be compatible across different
4892//     generations. So it is a good idea to save and restore
4893// output: A 3-D tensor with the shape of [seq_length, batch_size,
4894//     dir * num_units].
4895// output_h: The same shape has input_h.
4896// output_c: The same shape as input_c for LSTM. An empty tensor for other models.
4897// output_backprop: A 3-D tensor with the same shape as output in the forward pass.
4898// output_h_backprop: A 3-D tensor with the same shape as output_h in the forward
4899//     pass.
4900// output_c_backprop: A 3-D tensor with the same shape as output_c in the forward
4901//     pass.
4902// reserve_space: The same reserve_space produced in the forward operation.
4903// host_reserved: The same host_reserved produced in the forward operation.
4904// input_backprop: The backprop to input in the forward pass. Has the same shape
4905//     as input.
4906// input_h_backprop: The backprop to input_h in the forward pass. Has the same
4907//     shape as input_h.
4908// input_c_backprop: The backprop to input_c in the forward pass. Has the same
4909//     shape as input_c.
4910// params_backprop: The backprop to the params buffer in the forward pass. Has the
4911//     same shape as params.
4912func CudnnRNNBackpropV2(scope *Scope, input tf.Output, input_h tf.Output, input_c tf.Output, params tf.Output, output tf.Output, output_h tf.Output, output_c tf.Output, output_backprop tf.Output, output_h_backprop tf.Output, output_c_backprop tf.Output, reserve_space tf.Output, host_reserved tf.Output, optional ...CudnnRNNBackpropV2Attr) (input_backprop tf.Output, input_h_backprop tf.Output, input_c_backprop tf.Output, params_backprop tf.Output) {
4913	if scope.Err() != nil {
4914		return
4915	}
4916	attrs := map[string]interface{}{}
4917	for _, a := range optional {
4918		a(attrs)
4919	}
4920	opspec := tf.OpSpec{
4921		Type: "CudnnRNNBackpropV2",
4922		Input: []tf.Input{
4923			input, input_h, input_c, params, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space, host_reserved,
4924		},
4925		Attrs: attrs,
4926	}
4927	op := scope.AddOperation(opspec)
4928	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
4929}
4930
4931// CudnnRNNV2Attr is an optional argument to CudnnRNNV2.
4932type CudnnRNNV2Attr func(optionalAttr)
4933
4934// CudnnRNNV2RnnMode sets the optional rnn_mode attribute to value.
4935// If not specified, defaults to "lstm"
4936func CudnnRNNV2RnnMode(value string) CudnnRNNV2Attr {
4937	return func(m optionalAttr) {
4938		m["rnn_mode"] = value
4939	}
4940}
4941
4942// CudnnRNNV2InputMode sets the optional input_mode attribute to value.
4943// If not specified, defaults to "linear_input"
4944func CudnnRNNV2InputMode(value string) CudnnRNNV2Attr {
4945	return func(m optionalAttr) {
4946		m["input_mode"] = value
4947	}
4948}
4949
4950// CudnnRNNV2Direction sets the optional direction attribute to value.
4951// If not specified, defaults to "unidirectional"
4952func CudnnRNNV2Direction(value string) CudnnRNNV2Attr {
4953	return func(m optionalAttr) {
4954		m["direction"] = value
4955	}
4956}
4957
4958// CudnnRNNV2Dropout sets the optional dropout attribute to value.
4959// If not specified, defaults to 0
4960func CudnnRNNV2Dropout(value float32) CudnnRNNV2Attr {
4961	return func(m optionalAttr) {
4962		m["dropout"] = value
4963	}
4964}
4965
4966// CudnnRNNV2Seed sets the optional seed attribute to value.
4967// If not specified, defaults to 0
4968func CudnnRNNV2Seed(value int64) CudnnRNNV2Attr {
4969	return func(m optionalAttr) {
4970		m["seed"] = value
4971	}
4972}
4973
4974// CudnnRNNV2Seed2 sets the optional seed2 attribute to value.
4975// If not specified, defaults to 0
4976func CudnnRNNV2Seed2(value int64) CudnnRNNV2Attr {
4977	return func(m optionalAttr) {
4978		m["seed2"] = value
4979	}
4980}
4981
4982// CudnnRNNV2IsTraining sets the optional is_training attribute to value.
4983// If not specified, defaults to true
4984func CudnnRNNV2IsTraining(value bool) CudnnRNNV2Attr {
4985	return func(m optionalAttr) {
4986		m["is_training"] = value
4987	}
4988}
4989
4990// A RNN backed by cuDNN.
4991//
4992// Computes the RNN from the input and initial states, with respect to the params
4993// buffer. Produces one extra output "host_reserved" than CudnnRNN.
4994//
4995// rnn_mode: Indicates the type of the RNN model.
4996// input_mode: Indicates whether there is a linear projection between the input and
4997//   the actual computation before the first layer. 'skip_input' is only allowed
4998//   when input_size == num_units; 'auto_select' implies 'skip_input' when
4999//   input_size == num_units; otherwise, it implies 'linear_input'.
5000// direction: Indicates whether a bidirectional model will be used. Should be
5001//   "unidirectional" or "bidirectional".
5002// dropout: Dropout probability. When set to 0., dropout is disabled.
5003// seed: The 1st part of a seed to initialize dropout.
5004// seed2: The 2nd part of a seed to initialize dropout.
5005// input: A 3-D tensor with the shape of [seq_length, batch_size, input_size].
5006// input_h: A 3-D tensor with the shape of [num_layer * dir, batch_size,
5007//     num_units].
5008// input_c: For LSTM, a 3-D tensor with the shape of
5009//     [num_layer * dir, batch, num_units]. For other models, it is ignored.
5010// params: A 1-D tensor that contains the weights and biases in an opaque layout.
5011//     The size must be created through CudnnRNNParamsSize, and initialized
5012//     separately. Note that they might not be compatible across different
5013//     generations. So it is a good idea to save and restore
5014// output: A 3-D tensor with the shape of [seq_length, batch_size,
5015//     dir * num_units].
5016// output_h: The same shape has input_h.
5017// output_c: The same shape as input_c for LSTM. An empty tensor for other models.
5018// is_training: Indicates whether this operation is used for inferenece or
5019//   training.
5020// reserve_space: An opaque tensor that can be used in backprop calculation. It
5021//   is only produced if is_training is true.
5022// host_reserved: An opaque tensor that can be used in backprop calculation. It is
5023//   only produced if is_training is true. It is output on host memory rather than
5024//   device memory.
5025func CudnnRNNV2(scope *Scope, input tf.Output, input_h tf.Output, input_c tf.Output, params tf.Output, optional ...CudnnRNNV2Attr) (output tf.Output, output_h tf.Output, output_c tf.Output, reserve_space tf.Output, host_reserved tf.Output) {
5026	if scope.Err() != nil {
5027		return
5028	}
5029	attrs := map[string]interface{}{}
5030	for _, a := range optional {
5031		a(attrs)
5032	}
5033	opspec := tf.OpSpec{
5034		Type: "CudnnRNNV2",
5035		Input: []tf.Input{
5036			input, input_h, input_c, params,
5037		},
5038		Attrs: attrs,
5039	}
5040	op := scope.AddOperation(opspec)
5041	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
5042}
5043
5044// RecordInputAttr is an optional argument to RecordInput.
5045type RecordInputAttr func(optionalAttr)
5046
5047// RecordInputFileRandomSeed sets the optional file_random_seed attribute to value.
5048//
5049// value: Random seeds used to produce randomized records.
5050// If not specified, defaults to 301
5051func RecordInputFileRandomSeed(value int64) RecordInputAttr {
5052	return func(m optionalAttr) {
5053		m["file_random_seed"] = value
5054	}
5055}
5056
5057// RecordInputFileShuffleShiftRatio sets the optional file_shuffle_shift_ratio attribute to value.
5058//
5059// value: Shifts the list of files after the list is randomly
5060// shuffled.
5061// If not specified, defaults to 0
5062func RecordInputFileShuffleShiftRatio(value float32) RecordInputAttr {
5063	return func(m optionalAttr) {
5064		m["file_shuffle_shift_ratio"] = value
5065	}
5066}
5067
5068// RecordInputFileBufferSize sets the optional file_buffer_size attribute to value.
5069//
5070// value: The randomization shuffling buffer.
5071// If not specified, defaults to 10000
5072func RecordInputFileBufferSize(value int64) RecordInputAttr {
5073	return func(m optionalAttr) {
5074		m["file_buffer_size"] = value
5075	}
5076}
5077
5078// RecordInputFileParallelism sets the optional file_parallelism attribute to value.
5079//
5080// value: How many sstables are opened and concurrently iterated over.
5081// If not specified, defaults to 16
5082func RecordInputFileParallelism(value int64) RecordInputAttr {
5083	return func(m optionalAttr) {
5084		m["file_parallelism"] = value
5085	}
5086}
5087
5088// RecordInputBatchSize sets the optional batch_size attribute to value.
5089//
5090// value: The batch size.
5091// If not specified, defaults to 32
5092func RecordInputBatchSize(value int64) RecordInputAttr {
5093	return func(m optionalAttr) {
5094		m["batch_size"] = value
5095	}
5096}
5097
5098// RecordInputCompressionType sets the optional compression_type attribute to value.
5099//
5100// value: The type of compression for the file. Currently ZLIB and
5101// GZIP are supported. Defaults to none.
5102// If not specified, defaults to ""
5103func RecordInputCompressionType(value string) RecordInputAttr {
5104	return func(m optionalAttr) {
5105		m["compression_type"] = value
5106	}
5107}
5108
5109// Emits randomized records.
5110//
5111// Arguments:
5112//	file_pattern: Glob pattern for the data files.
5113//
5114// Returns A tensor of shape [batch_size].
5115func RecordInput(scope *Scope, file_pattern string, optional ...RecordInputAttr) (records tf.Output) {
5116	if scope.Err() != nil {
5117		return
5118	}
5119	attrs := map[string]interface{}{"file_pattern": file_pattern}
5120	for _, a := range optional {
5121		a(attrs)
5122	}
5123	opspec := tf.OpSpec{
5124		Type: "RecordInput",
5125
5126		Attrs: attrs,
5127	}
5128	op := scope.AddOperation(opspec)
5129	return op.Output(0)
5130}
5131
5132// OrderedMapClearAttr is an optional argument to OrderedMapClear.
5133type OrderedMapClearAttr func(optionalAttr)
5134
5135// OrderedMapClearCapacity sets the optional capacity attribute to value.
5136// If not specified, defaults to 0
5137//
5138// REQUIRES: value >= 0
5139func OrderedMapClearCapacity(value int64) OrderedMapClearAttr {
5140	return func(m optionalAttr) {
5141		m["capacity"] = value
5142	}
5143}
5144
5145// OrderedMapClearMemoryLimit sets the optional memory_limit attribute to value.
5146// If not specified, defaults to 0
5147//
5148// REQUIRES: value >= 0
5149func OrderedMapClearMemoryLimit(value int64) OrderedMapClearAttr {
5150	return func(m optionalAttr) {
5151		m["memory_limit"] = value
5152	}
5153}
5154
5155// OrderedMapClearContainer sets the optional container attribute to value.
5156// If not specified, defaults to ""
5157func OrderedMapClearContainer(value string) OrderedMapClearAttr {
5158	return func(m optionalAttr) {
5159		m["container"] = value
5160	}
5161}
5162
5163// OrderedMapClearSharedName sets the optional shared_name attribute to value.
5164// If not specified, defaults to ""
5165func OrderedMapClearSharedName(value string) OrderedMapClearAttr {
5166	return func(m optionalAttr) {
5167		m["shared_name"] = value
5168	}
5169}
5170
5171// Op removes all elements in the underlying container.
5172//
5173// Returns the created operation.
5174func OrderedMapClear(scope *Scope, dtypes []tf.DataType, optional ...OrderedMapClearAttr) (o *tf.Operation) {
5175	if scope.Err() != nil {
5176		return
5177	}
5178	attrs := map[string]interface{}{"dtypes": dtypes}
5179	for _, a := range optional {
5180		a(attrs)
5181	}
5182	opspec := tf.OpSpec{
5183		Type: "OrderedMapClear",
5184
5185		Attrs: attrs,
5186	}
5187	return scope.AddOperation(opspec)
5188}
5189
5190// OrderedMapIncompleteSizeAttr is an optional argument to OrderedMapIncompleteSize.
5191type OrderedMapIncompleteSizeAttr func(optionalAttr)
5192
5193// OrderedMapIncompleteSizeCapacity sets the optional capacity attribute to value.
5194// If not specified, defaults to 0
5195//
5196// REQUIRES: value >= 0
5197func OrderedMapIncompleteSizeCapacity(value int64) OrderedMapIncompleteSizeAttr {
5198	return func(m optionalAttr) {
5199		m["capacity"] = value
5200	}
5201}
5202
5203// OrderedMapIncompleteSizeMemoryLimit sets the optional memory_limit attribute to value.
5204// If not specified, defaults to 0
5205//
5206// REQUIRES: value >= 0
5207func OrderedMapIncompleteSizeMemoryLimit(value int64) OrderedMapIncompleteSizeAttr {
5208	return func(m optionalAttr) {
5209		m["memory_limit"] = value
5210	}
5211}
5212
5213// OrderedMapIncompleteSizeContainer sets the optional container attribute to value.
5214// If not specified, defaults to ""
5215func OrderedMapIncompleteSizeContainer(value string) OrderedMapIncompleteSizeAttr {
5216	return func(m optionalAttr) {
5217		m["container"] = value
5218	}
5219}
5220
5221// OrderedMapIncompleteSizeSharedName sets the optional shared_name attribute to value.
5222// If not specified, defaults to ""
5223func OrderedMapIncompleteSizeSharedName(value string) OrderedMapIncompleteSizeAttr {
5224	return func(m optionalAttr) {
5225		m["shared_name"] = value
5226	}
5227}
5228
5229// Op returns the number of incomplete elements in the underlying container.
5230func OrderedMapIncompleteSize(scope *Scope, dtypes []tf.DataType, optional ...OrderedMapIncompleteSizeAttr) (size tf.Output) {
5231	if scope.Err() != nil {
5232		return
5233	}
5234	attrs := map[string]interface{}{"dtypes": dtypes}
5235	for _, a := range optional {
5236		a(attrs)
5237	}
5238	opspec := tf.OpSpec{
5239		Type: "OrderedMapIncompleteSize",
5240
5241		Attrs: attrs,
5242	}
5243	op := scope.AddOperation(opspec)
5244	return op.Output(0)
5245}
5246
5247// BoostedTreesQuantileStreamResourceHandleOpAttr is an optional argument to BoostedTreesQuantileStreamResourceHandleOp.
5248type BoostedTreesQuantileStreamResourceHandleOpAttr func(optionalAttr)
5249
5250// BoostedTreesQuantileStreamResourceHandleOpContainer sets the optional container attribute to value.
5251// If not specified, defaults to ""
5252func BoostedTreesQuantileStreamResourceHandleOpContainer(value string) BoostedTreesQuantileStreamResourceHandleOpAttr {
5253	return func(m optionalAttr) {
5254		m["container"] = value
5255	}
5256}
5257
5258// BoostedTreesQuantileStreamResourceHandleOpSharedName sets the optional shared_name attribute to value.
5259// If not specified, defaults to ""
5260func BoostedTreesQuantileStreamResourceHandleOpSharedName(value string) BoostedTreesQuantileStreamResourceHandleOpAttr {
5261	return func(m optionalAttr) {
5262		m["shared_name"] = value
5263	}
5264}
5265
5266// Creates a handle to a BoostedTreesQuantileStreamResource.
5267func BoostedTreesQuantileStreamResourceHandleOp(scope *Scope, optional ...BoostedTreesQuantileStreamResourceHandleOpAttr) (resource tf.Output) {
5268	if scope.Err() != nil {
5269		return
5270	}
5271	attrs := map[string]interface{}{}
5272	for _, a := range optional {
5273		a(attrs)
5274	}
5275	opspec := tf.OpSpec{
5276		Type: "BoostedTreesQuantileStreamResourceHandleOp",
5277
5278		Attrs: attrs,
5279	}
5280	op := scope.AddOperation(opspec)
5281	return op.Output(0)
5282}
5283
5284// OrderedMapSizeAttr is an optional argument to OrderedMapSize.
5285type OrderedMapSizeAttr func(optionalAttr)
5286
5287// OrderedMapSizeCapacity sets the optional capacity attribute to value.
5288// If not specified, defaults to 0
5289//
5290// REQUIRES: value >= 0
5291func OrderedMapSizeCapacity(value int64) OrderedMapSizeAttr {
5292	return func(m optionalAttr) {
5293		m["capacity"] = value
5294	}
5295}
5296
5297// OrderedMapSizeMemoryLimit sets the optional memory_limit attribute to value.
5298// If not specified, defaults to 0
5299//
5300// REQUIRES: value >= 0
5301func OrderedMapSizeMemoryLimit(value int64) OrderedMapSizeAttr {
5302	return func(m optionalAttr) {
5303		m["memory_limit"] = value
5304	}
5305}
5306
5307// OrderedMapSizeContainer sets the optional container attribute to value.
5308// If not specified, defaults to ""
5309func OrderedMapSizeContainer(value string) OrderedMapSizeAttr {
5310	return func(m optionalAttr) {
5311		m["container"] = value
5312	}
5313}
5314
5315// OrderedMapSizeSharedName sets the optional shared_name attribute to value.
5316// If not specified, defaults to ""
5317func OrderedMapSizeSharedName(value string) OrderedMapSizeAttr {
5318	return func(m optionalAttr) {
5319		m["shared_name"] = value
5320	}
5321}
5322
5323// Op returns the number of elements in the underlying container.
5324func OrderedMapSize(scope *Scope, dtypes []tf.DataType, optional ...OrderedMapSizeAttr) (size tf.Output) {
5325	if scope.Err() != nil {
5326		return
5327	}
5328	attrs := map[string]interface{}{"dtypes": dtypes}
5329	for _, a := range optional {
5330		a(attrs)
5331	}
5332	opspec := tf.OpSpec{
5333		Type: "OrderedMapSize",
5334
5335		Attrs: attrs,
5336	}
5337	op := scope.AddOperation(opspec)
5338	return op.Output(0)
5339}
5340
5341// Generate the bucket boundaries for each feature based on accumulated summaries.
5342//
5343// An op that returns a list of float tensors for a quantile stream resource. Each
5344// tensor is Rank 1 containing bucket boundaries for a single feature.
5345//
5346// Arguments:
5347//	quantile_stream_resource_handle: resource handle referring to a QuantileStreamResource.
5348//	num_features: inferred int; number of features to get bucket boundaries for.
5349//
5350// Returns float; List of Rank 1 Tensors each containing the bucket boundaries for a feature.
5351func BoostedTreesQuantileStreamResourceGetBucketBoundaries(scope *Scope, quantile_stream_resource_handle tf.Output, num_features int64) (bucket_boundaries []tf.Output) {
5352	if scope.Err() != nil {
5353		return
5354	}
5355	attrs := map[string]interface{}{"num_features": num_features}
5356	opspec := tf.OpSpec{
5357		Type: "BoostedTreesQuantileStreamResourceGetBucketBoundaries",
5358		Input: []tf.Input{
5359			quantile_stream_resource_handle,
5360		},
5361		Attrs: attrs,
5362	}
5363	op := scope.AddOperation(opspec)
5364	if scope.Err() != nil {
5365		return
5366	}
5367	var idx int
5368	var err error
5369	if bucket_boundaries, idx, err = makeOutputList(op, idx, "bucket_boundaries"); err != nil {
5370		scope.UpdateErr("BoostedTreesQuantileStreamResourceGetBucketBoundaries", err)
5371		return
5372	}
5373	return bucket_boundaries
5374}
5375
5376// OrderedMapUnstageAttr is an optional argument to OrderedMapUnstage.
5377type OrderedMapUnstageAttr func(optionalAttr)
5378
5379// OrderedMapUnstageCapacity sets the optional capacity attribute to value.
5380// If not specified, defaults to 0
5381//
5382// REQUIRES: value >= 0
5383func OrderedMapUnstageCapacity(value int64) OrderedMapUnstageAttr {
5384	return func(m optionalAttr) {
5385		m["capacity"] = value
5386	}
5387}
5388
5389// OrderedMapUnstageMemoryLimit sets the optional memory_limit attribute to value.
5390// If not specified, defaults to 0
5391//
5392// REQUIRES: value >= 0
5393func OrderedMapUnstageMemoryLimit(value int64) OrderedMapUnstageAttr {
5394	return func(m optionalAttr) {
5395		m["memory_limit"] = value
5396	}
5397}
5398
5399// OrderedMapUnstageContainer sets the optional container attribute to value.
5400// If not specified, defaults to ""
5401func OrderedMapUnstageContainer(value string) OrderedMapUnstageAttr {
5402	return func(m optionalAttr) {
5403		m["container"] = value
5404	}
5405}
5406
5407// OrderedMapUnstageSharedName sets the optional shared_name attribute to value.
5408// If not specified, defaults to ""
5409func OrderedMapUnstageSharedName(value string) OrderedMapUnstageAttr {
5410	return func(m optionalAttr) {
5411		m["shared_name"] = value
5412	}
5413}
5414
5415// Op removes and returns the values associated with the key
5416//
5417// from the underlying container.   If the underlying container
5418// does not contain this key, the op will block until it does.
5419func OrderedMapUnstage(scope *Scope, key tf.Output, indices tf.Output, dtypes []tf.DataType, optional ...OrderedMapUnstageAttr) (values []tf.Output) {
5420	if scope.Err() != nil {
5421		return
5422	}
5423	attrs := map[string]interface{}{"dtypes": dtypes}
5424	for _, a := range optional {
5425		a(attrs)
5426	}
5427	opspec := tf.OpSpec{
5428		Type: "OrderedMapUnstage",
5429		Input: []tf.Input{
5430			key, indices,
5431		},
5432		Attrs: attrs,
5433	}
5434	op := scope.AddOperation(opspec)
5435	if scope.Err() != nil {
5436		return
5437	}
5438	var idx int
5439	var err error
5440	if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
5441		scope.UpdateErr("OrderedMapUnstage", err)
5442		return
5443	}
5444	return values
5445}
5446
5447// OrderedMapPeekAttr is an optional argument to OrderedMapPeek.
5448type OrderedMapPeekAttr func(optionalAttr)
5449
5450// OrderedMapPeekCapacity sets the optional capacity attribute to value.
5451// If not specified, defaults to 0
5452//
5453// REQUIRES: value >= 0
5454func OrderedMapPeekCapacity(value int64) OrderedMapPeekAttr {
5455	return func(m optionalAttr) {
5456		m["capacity"] = value
5457	}
5458}
5459
5460// OrderedMapPeekMemoryLimit sets the optional memory_limit attribute to value.
5461// If not specified, defaults to 0
5462//
5463// REQUIRES: value >= 0
5464func OrderedMapPeekMemoryLimit(value int64) OrderedMapPeekAttr {
5465	return func(m optionalAttr) {
5466		m["memory_limit"] = value
5467	}
5468}
5469
5470// OrderedMapPeekContainer sets the optional container attribute to value.
5471// If not specified, defaults to ""
5472func OrderedMapPeekContainer(value string) OrderedMapPeekAttr {
5473	return func(m optionalAttr) {
5474		m["container"] = value
5475	}
5476}
5477
5478// OrderedMapPeekSharedName sets the optional shared_name attribute to value.
5479// If not specified, defaults to ""
5480func OrderedMapPeekSharedName(value string) OrderedMapPeekAttr {
5481	return func(m optionalAttr) {
5482		m["shared_name"] = value
5483	}
5484}
5485
5486// Op peeks at the values at the specified key.  If the
5487//
5488// underlying container does not contain this key
5489// this op will block until it does.   This Op is optimized for
5490// performance.
5491func OrderedMapPeek(scope *Scope, key tf.Output, indices tf.Output, dtypes []tf.DataType, optional ...OrderedMapPeekAttr) (values []tf.Output) {
5492	if scope.Err() != nil {
5493		return
5494	}
5495	attrs := map[string]interface{}{"dtypes": dtypes}
5496	for _, a := range optional {
5497		a(attrs)
5498	}
5499	opspec := tf.OpSpec{
5500		Type: "OrderedMapPeek",
5501		Input: []tf.Input{
5502			key, indices,
5503		},
5504		Attrs: attrs,
5505	}
5506	op := scope.AddOperation(opspec)
5507	if scope.Err() != nil {
5508		return
5509	}
5510	var idx int
5511	var err error
5512	if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
5513		scope.UpdateErr("OrderedMapPeek", err)
5514		return
5515	}
5516	return values
5517}
5518
5519// MapIncompleteSizeAttr is an optional argument to MapIncompleteSize.
5520type MapIncompleteSizeAttr func(optionalAttr)
5521
5522// MapIncompleteSizeCapacity sets the optional capacity attribute to value.
5523// If not specified, defaults to 0
5524//
5525// REQUIRES: value >= 0
5526func MapIncompleteSizeCapacity(value int64) MapIncompleteSizeAttr {
5527	return func(m optionalAttr) {
5528		m["capacity"] = value
5529	}
5530}
5531
5532// MapIncompleteSizeMemoryLimit sets the optional memory_limit attribute to value.
5533// If not specified, defaults to 0
5534//
5535// REQUIRES: value >= 0
5536func MapIncompleteSizeMemoryLimit(value int64) MapIncompleteSizeAttr {
5537	return func(m optionalAttr) {
5538		m["memory_limit"] = value
5539	}
5540}
5541
5542// MapIncompleteSizeContainer sets the optional container attribute to value.
5543// If not specified, defaults to ""
5544func MapIncompleteSizeContainer(value string) MapIncompleteSizeAttr {
5545	return func(m optionalAttr) {
5546		m["container"] = value
5547	}
5548}
5549
5550// MapIncompleteSizeSharedName sets the optional shared_name attribute to value.
5551// If not specified, defaults to ""
5552func MapIncompleteSizeSharedName(value string) MapIncompleteSizeAttr {
5553	return func(m optionalAttr) {
5554		m["shared_name"] = value
5555	}
5556}
5557
5558// Op returns the number of incomplete elements in the underlying container.
5559func MapIncompleteSize(scope *Scope, dtypes []tf.DataType, optional ...MapIncompleteSizeAttr) (size tf.Output) {
5560	if scope.Err() != nil {
5561		return
5562	}
5563	attrs := map[string]interface{}{"dtypes": dtypes}
5564	for _, a := range optional {
5565		a(attrs)
5566	}
5567	opspec := tf.OpSpec{
5568		Type: "MapIncompleteSize",
5569
5570		Attrs: attrs,
5571	}
5572	op := scope.AddOperation(opspec)
5573	return op.Output(0)
5574}
5575
5576// MapSizeAttr is an optional argument to MapSize.
5577type MapSizeAttr func(optionalAttr)
5578
5579// MapSizeCapacity sets the optional capacity attribute to value.
5580// If not specified, defaults to 0
5581//
5582// REQUIRES: value >= 0
5583func MapSizeCapacity(value int64) MapSizeAttr {
5584	return func(m optionalAttr) {
5585		m["capacity"] = value
5586	}
5587}
5588
5589// MapSizeMemoryLimit sets the optional memory_limit attribute to value.
5590// If not specified, defaults to 0
5591//
5592// REQUIRES: value >= 0
5593func MapSizeMemoryLimit(value int64) MapSizeAttr {
5594	return func(m optionalAttr) {
5595		m["memory_limit"] = value
5596	}
5597}
5598
5599// MapSizeContainer sets the optional container attribute to value.
5600// If not specified, defaults to ""
5601func MapSizeContainer(value string) MapSizeAttr {
5602	return func(m optionalAttr) {
5603		m["container"] = value
5604	}
5605}
5606
5607// MapSizeSharedName sets the optional shared_name attribute to value.
5608// If not specified, defaults to ""
5609func MapSizeSharedName(value string) MapSizeAttr {
5610	return func(m optionalAttr) {
5611		m["shared_name"] = value
5612	}
5613}
5614
5615// Op returns the number of elements in the underlying container.
5616func MapSize(scope *Scope, dtypes []tf.DataType, optional ...MapSizeAttr) (size tf.Output) {
5617	if scope.Err() != nil {
5618		return
5619	}
5620	attrs := map[string]interface{}{"dtypes": dtypes}
5621	for _, a := range optional {
5622		a(attrs)
5623	}
5624	opspec := tf.OpSpec{
5625		Type: "MapSize",
5626
5627		Attrs: attrs,
5628	}
5629	op := scope.AddOperation(opspec)
5630	return op.Output(0)
5631}
5632
5633// MapPeekAttr is an optional argument to MapPeek.
5634type MapPeekAttr func(optionalAttr)
5635
5636// MapPeekCapacity sets the optional capacity attribute to value.
5637// If not specified, defaults to 0
5638//
5639// REQUIRES: value >= 0
5640func MapPeekCapacity(value int64) MapPeekAttr {
5641	return func(m optionalAttr) {
5642		m["capacity"] = value
5643	}
5644}
5645
5646// MapPeekMemoryLimit sets the optional memory_limit attribute to value.
5647// If not specified, defaults to 0
5648//
5649// REQUIRES: value >= 0
5650func MapPeekMemoryLimit(value int64) MapPeekAttr {
5651	return func(m optionalAttr) {
5652		m["memory_limit"] = value
5653	}
5654}
5655
5656// MapPeekContainer sets the optional container attribute to value.
5657// If not specified, defaults to ""
5658func MapPeekContainer(value string) MapPeekAttr {
5659	return func(m optionalAttr) {
5660		m["container"] = value
5661	}
5662}
5663
5664// MapPeekSharedName sets the optional shared_name attribute to value.
5665// If not specified, defaults to ""
5666func MapPeekSharedName(value string) MapPeekAttr {
5667	return func(m optionalAttr) {
5668		m["shared_name"] = value
5669	}
5670}
5671
5672// Op peeks at the values at the specified key.  If the
5673//
5674// underlying container does not contain this key
5675// this op will block until it does.
5676func MapPeek(scope *Scope, key tf.Output, indices tf.Output, dtypes []tf.DataType, optional ...MapPeekAttr) (values []tf.Output) {
5677	if scope.Err() != nil {
5678		return
5679	}
5680	attrs := map[string]interface{}{"dtypes": dtypes}
5681	for _, a := range optional {
5682		a(attrs)
5683	}
5684	opspec := tf.OpSpec{
5685		Type: "MapPeek",
5686		Input: []tf.Input{
5687			key, indices,
5688		},
5689		Attrs: attrs,
5690	}
5691	op := scope.AddOperation(opspec)
5692	if scope.Err() != nil {
5693		return
5694	}
5695	var idx int
5696	var err error
5697	if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
5698		scope.UpdateErr("MapPeek", err)
5699		return
5700	}
5701	return values
5702}
5703
5704// MapStageAttr is an optional argument to MapStage.
5705type MapStageAttr func(optionalAttr)
5706
5707// MapStageCapacity sets the optional capacity attribute to value.
5708//
5709// value: Maximum number of elements in the Staging Area. If > 0, inserts
5710// on the container will block when the capacity is reached.
5711// If not specified, defaults to 0
5712//
5713// REQUIRES: value >= 0
5714func MapStageCapacity(value int64) MapStageAttr {
5715	return func(m optionalAttr) {
5716		m["capacity"] = value
5717	}
5718}
5719
5720// MapStageMemoryLimit sets the optional memory_limit attribute to value.
5721// If not specified, defaults to 0
5722//
5723// REQUIRES: value >= 0
5724func MapStageMemoryLimit(value int64) MapStageAttr {
5725	return func(m optionalAttr) {
5726		m["memory_limit"] = value
5727	}
5728}
5729
5730// MapStageContainer sets the optional container attribute to value.
5731//
5732// value: If non-empty, this queue is placed in the given container. Otherwise,
5733// a default container is used.
5734// If not specified, defaults to ""
5735func MapStageContainer(value string) MapStageAttr {
5736	return func(m optionalAttr) {
5737		m["container"] = value
5738	}
5739}
5740
5741// MapStageSharedName sets the optional shared_name attribute to value.
5742//
5743// value: It is necessary to match this name to the matching Unstage Op.
5744// If not specified, defaults to ""
5745func MapStageSharedName(value string) MapStageAttr {
5746	return func(m optionalAttr) {
5747		m["shared_name"] = value
5748	}
5749}
5750
5751// Stage (key, values) in the underlying container which behaves like a hashtable.
5752//
5753// Arguments:
5754//	key: int64
5755//
5756//	values: a list of tensors
5757// dtypes A list of data types that inserted values should adhere to.
5758//
5759//
5760// Returns the created operation.
5761func MapStage(scope *Scope, key tf.Output, indices tf.Output, values []tf.Output, dtypes []tf.DataType, optional ...MapStageAttr) (o *tf.Operation) {
5762	if scope.Err() != nil {
5763		return
5764	}
5765	attrs := map[string]interface{}{"dtypes": dtypes}
5766	for _, a := range optional {
5767		a(attrs)
5768	}
5769	opspec := tf.OpSpec{
5770		Type: "MapStage",
5771		Input: []tf.Input{
5772			key, indices, tf.OutputList(values),
5773		},
5774		Attrs: attrs,
5775	}
5776	return scope.AddOperation(opspec)
5777}
5778
5779// StageClearAttr is an optional argument to StageClear.
5780type StageClearAttr func(optionalAttr)
5781
5782// StageClearCapacity sets the optional capacity attribute to value.
5783// If not specified, defaults to 0
5784//
5785// REQUIRES: value >= 0
5786func StageClearCapacity(value int64) StageClearAttr {
5787	return func(m optionalAttr) {
5788		m["capacity"] = value
5789	}
5790}
5791
5792// StageClearMemoryLimit sets the optional memory_limit attribute to value.
5793// If not specified, defaults to 0
5794//
5795// REQUIRES: value >= 0
5796func StageClearMemoryLimit(value int64) StageClearAttr {
5797	return func(m optionalAttr) {
5798		m["memory_limit"] = value
5799	}
5800}
5801
5802// StageClearContainer sets the optional container attribute to value.
5803// If not specified, defaults to ""
5804func StageClearContainer(value string) StageClearAttr {
5805	return func(m optionalAttr) {
5806		m["container"] = value
5807	}
5808}
5809
5810// StageClearSharedName sets the optional shared_name attribute to value.
5811// If not specified, defaults to ""
5812func StageClearSharedName(value string) StageClearAttr {
5813	return func(m optionalAttr) {
5814		m["shared_name"] = value
5815	}
5816}
5817
5818// Op removes all elements in the underlying container.
5819//
5820// Returns the created operation.
5821func StageClear(scope *Scope, dtypes []tf.DataType, optional ...StageClearAttr) (o *tf.Operation) {
5822	if scope.Err() != nil {
5823		return
5824	}
5825	attrs := map[string]interface{}{"dtypes": dtypes}
5826	for _, a := range optional {
5827		a(attrs)
5828	}
5829	opspec := tf.OpSpec{
5830		Type: "StageClear",
5831
5832		Attrs: attrs,
5833	}
5834	return scope.AddOperation(opspec)
5835}
5836
5837// StageSizeAttr is an optional argument to StageSize.
5838type StageSizeAttr func(optionalAttr)
5839
5840// StageSizeCapacity sets the optional capacity attribute to value.
5841// If not specified, defaults to 0
5842//
5843// REQUIRES: value >= 0
5844func StageSizeCapacity(value int64) StageSizeAttr {
5845	return func(m optionalAttr) {
5846		m["capacity"] = value
5847	}
5848}
5849
5850// StageSizeMemoryLimit sets the optional memory_limit attribute to value.
5851// If not specified, defaults to 0
5852//
5853// REQUIRES: value >= 0
5854func StageSizeMemoryLimit(value int64) StageSizeAttr {
5855	return func(m optionalAttr) {
5856		m["memory_limit"] = value
5857	}
5858}
5859
5860// StageSizeContainer sets the optional container attribute to value.
5861// If not specified, defaults to ""
5862func StageSizeContainer(value string) StageSizeAttr {
5863	return func(m optionalAttr) {
5864		m["container"] = value
5865	}
5866}
5867
5868// StageSizeSharedName sets the optional shared_name attribute to value.
5869// If not specified, defaults to ""
5870func StageSizeSharedName(value string) StageSizeAttr {
5871	return func(m optionalAttr) {
5872		m["shared_name"] = value
5873	}
5874}
5875
5876// Op returns the number of elements in the underlying container.
5877func StageSize(scope *Scope, dtypes []tf.DataType, optional ...StageSizeAttr) (size tf.Output) {
5878	if scope.Err() != nil {
5879		return
5880	}
5881	attrs := map[string]interface{}{"dtypes": dtypes}
5882	for _, a := range optional {
5883		a(attrs)
5884	}
5885	opspec := tf.OpSpec{
5886		Type: "StageSize",
5887
5888		Attrs: attrs,
5889	}
5890	op := scope.AddOperation(opspec)
5891	return op.Output(0)
5892}
5893
5894// StagePeekAttr is an optional argument to StagePeek.
5895type StagePeekAttr func(optionalAttr)
5896
5897// StagePeekCapacity sets the optional capacity attribute to value.
5898// If not specified, defaults to 0
5899//
5900// REQUIRES: value >= 0
5901func StagePeekCapacity(value int64) StagePeekAttr {
5902	return func(m optionalAttr) {
5903		m["capacity"] = value
5904	}
5905}
5906
5907// StagePeekMemoryLimit sets the optional memory_limit attribute to value.
5908// If not specified, defaults to 0
5909//
5910// REQUIRES: value >= 0
5911func StagePeekMemoryLimit(value int64) StagePeekAttr {
5912	return func(m optionalAttr) {
5913		m["memory_limit"] = value
5914	}
5915}
5916
5917// StagePeekContainer sets the optional container attribute to value.
5918// If not specified, defaults to ""
5919func StagePeekContainer(value string) StagePeekAttr {
5920	return func(m optionalAttr) {
5921		m["container"] = value
5922	}
5923}
5924
5925// StagePeekSharedName sets the optional shared_name attribute to value.
5926// If not specified, defaults to ""
5927func StagePeekSharedName(value string) StagePeekAttr {
5928	return func(m optionalAttr) {
5929		m["shared_name"] = value
5930	}
5931}
5932
5933// Op peeks at the values at the specified index.  If the
5934//
5935// underlying container does not contain sufficient elements
5936// this op will block until it does.   This Op is optimized for
5937// performance.
5938func StagePeek(scope *Scope, index tf.Output, dtypes []tf.DataType, optional ...StagePeekAttr) (values []tf.Output) {
5939	if scope.Err() != nil {
5940		return
5941	}
5942	attrs := map[string]interface{}{"dtypes": dtypes}
5943	for _, a := range optional {
5944		a(attrs)
5945	}
5946	opspec := tf.OpSpec{
5947		Type: "StagePeek",
5948		Input: []tf.Input{
5949			index,
5950		},
5951		Attrs: attrs,
5952	}
5953	op := scope.AddOperation(opspec)
5954	if scope.Err() != nil {
5955		return
5956	}
5957	var idx int
5958	var err error
5959	if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
5960		scope.UpdateErr("StagePeek", err)
5961		return
5962	}
5963	return values
5964}
5965
5966// UnstageAttr is an optional argument to Unstage.
5967type UnstageAttr func(optionalAttr)
5968
5969// UnstageCapacity sets the optional capacity attribute to value.
5970// If not specified, defaults to 0
5971//
5972// REQUIRES: value >= 0
5973func UnstageCapacity(value int64) UnstageAttr {
5974	return func(m optionalAttr) {
5975		m["capacity"] = value
5976	}
5977}
5978
5979// UnstageMemoryLimit sets the optional memory_limit attribute to value.
5980// If not specified, defaults to 0
5981//
5982// REQUIRES: value >= 0
5983func UnstageMemoryLimit(value int64) UnstageAttr {
5984	return func(m optionalAttr) {
5985		m["memory_limit"] = value
5986	}
5987}
5988
5989// UnstageContainer sets the optional container attribute to value.
5990// If not specified, defaults to ""
5991func UnstageContainer(value string) UnstageAttr {
5992	return func(m optionalAttr) {
5993		m["container"] = value
5994	}
5995}
5996
5997// UnstageSharedName sets the optional shared_name attribute to value.
5998// If not specified, defaults to ""
5999func UnstageSharedName(value string) UnstageAttr {
6000	return func(m optionalAttr) {
6001		m["shared_name"] = value
6002	}
6003}
6004
6005// Op is similar to a lightweight Dequeue.
6006//
6007// The basic functionality is similar to dequeue with many fewer
6008// capabilities and options.  This Op is optimized for performance.
6009func Unstage(scope *Scope, dtypes []tf.DataType, optional ...UnstageAttr) (values []tf.Output) {
6010	if scope.Err() != nil {
6011		return
6012	}
6013	attrs := map[string]interface{}{"dtypes": dtypes}
6014	for _, a := range optional {
6015		a(attrs)
6016	}
6017	opspec := tf.OpSpec{
6018		Type: "Unstage",
6019
6020		Attrs: attrs,
6021	}
6022	op := scope.AddOperation(opspec)
6023	if scope.Err() != nil {
6024		return
6025	}
6026	var idx int
6027	var err error
6028	if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
6029		scope.UpdateErr("Unstage", err)
6030		return
6031	}
6032	return values
6033}
6034
6035// StageAttr is an optional argument to Stage.
6036type StageAttr func(optionalAttr)
6037
6038// StageCapacity sets the optional capacity attribute to value.
6039//
6040// value: Maximum number of elements in the Staging Area. If > 0, inserts
6041// on the container will block when the capacity is reached.
6042// If not specified, defaults to 0
6043//
6044// REQUIRES: value >= 0
6045func StageCapacity(value int64) StageAttr {
6046	return func(m optionalAttr) {
6047		m["capacity"] = value
6048	}
6049}
6050
6051// StageMemoryLimit sets the optional memory_limit attribute to value.
6052//
6053// value: The maximum number of bytes allowed for Tensors in the Staging Area.
6054// If > 0, inserts will block until sufficient space is available.
6055// If not specified, defaults to 0
6056//
6057// REQUIRES: value >= 0
6058func StageMemoryLimit(value int64) StageAttr {
6059	return func(m optionalAttr) {
6060		m["memory_limit"] = value
6061	}
6062}
6063
6064// StageContainer sets the optional container attribute to value.
6065//
6066// value: If non-empty, this queue is placed in the given container. Otherwise,
6067// a default container is used.
6068// If not specified, defaults to ""
6069func StageContainer(value string) StageAttr {
6070	return func(m optionalAttr) {
6071		m["container"] = value
6072	}
6073}
6074
6075// StageSharedName sets the optional shared_name attribute to value.
6076//
6077// value: It is necessary to match this name to the matching Unstage Op.
6078// If not specified, defaults to ""
6079func StageSharedName(value string) StageAttr {
6080	return func(m optionalAttr) {
6081		m["shared_name"] = value
6082	}
6083}
6084
6085// Stage values similar to a lightweight Enqueue.
6086//
6087// The basic functionality of this Op is similar to a queue with many
6088// fewer capabilities and options.  This Op is optimized for performance.
6089//
6090// Arguments:
6091//	values: a list of tensors
6092// dtypes A list of data types that inserted values should adhere to.
6093//
6094// Returns the created operation.
6095func Stage(scope *Scope, values []tf.Output, optional ...StageAttr) (o *tf.Operation) {
6096	if scope.Err() != nil {
6097		return
6098	}
6099	attrs := map[string]interface{}{}
6100	for _, a := range optional {
6101		a(attrs)
6102	}
6103	opspec := tf.OpSpec{
6104		Type: "Stage",
6105		Input: []tf.Input{
6106			tf.OutputList(values),
6107		},
6108		Attrs: attrs,
6109	}
6110	return scope.AddOperation(opspec)
6111}
6112
6113// Delete the tensor specified by its handle in the session.
6114//
6115// Arguments:
6116//	handle: The handle for a tensor stored in the session state.
6117//
6118// Returns the created operation.
6119func DeleteSessionTensor(scope *Scope, handle tf.Output) (o *tf.Operation) {
6120	if scope.Err() != nil {
6121		return
6122	}
6123	opspec := tf.OpSpec{
6124		Type: "DeleteSessionTensor",
6125		Input: []tf.Input{
6126			handle,
6127		},
6128	}
6129	return scope.AddOperation(opspec)
6130}
6131
6132// Store the input tensor in the state of the current session.
6133//
6134// Arguments:
6135//	value: The tensor to be stored.
6136//
6137// Returns The handle for the tensor stored in the session state, represented
6138// as a string.
6139func GetSessionHandle(scope *Scope, value tf.Output) (handle tf.Output) {
6140	if scope.Err() != nil {
6141		return
6142	}
6143	opspec := tf.OpSpec{
6144		Type: "GetSessionHandle",
6145		Input: []tf.Input{
6146			value,
6147		},
6148	}
6149	op := scope.AddOperation(opspec)
6150	return op.Output(0)
6151}
6152
6153// Deprecated. Use TensorArraySizeV3
6154//
6155// DEPRECATED at GraphDef version 26: Use TensorArraySizeV3
6156func TensorArraySizeV2(scope *Scope, handle tf.Output, flow_in tf.Output) (size tf.Output) {
6157	if scope.Err() != nil {
6158		return
6159	}
6160	opspec := tf.OpSpec{
6161		Type: "TensorArraySizeV2",
6162		Input: []tf.Input{
6163			handle, flow_in,
6164		},
6165	}
6166	op := scope.AddOperation(opspec)
6167	return op.Output(0)
6168}
6169
6170// TensorArrayConcatV2Attr is an optional argument to TensorArrayConcatV2.
6171type TensorArrayConcatV2Attr func(optionalAttr)
6172
6173// TensorArrayConcatV2ElementShapeExcept0 sets the optional element_shape_except0 attribute to value.
6174// If not specified, defaults to <unknown_rank:true >
6175func TensorArrayConcatV2ElementShapeExcept0(value tf.Shape) TensorArrayConcatV2Attr {
6176	return func(m optionalAttr) {
6177		m["element_shape_except0"] = value
6178	}
6179}
6180
6181// Deprecated. Use TensorArrayConcatV3
6182func TensorArrayConcatV2(scope *Scope, handle tf.Output, flow_in tf.Output, dtype tf.DataType, optional ...TensorArrayConcatV2Attr) (value tf.Output, lengths tf.Output) {
6183	if scope.Err() != nil {
6184		return
6185	}
6186	attrs := map[string]interface{}{"dtype": dtype}
6187	for _, a := range optional {
6188		a(attrs)
6189	}
6190	opspec := tf.OpSpec{
6191		Type: "TensorArrayConcatV2",
6192		Input: []tf.Input{
6193			handle, flow_in,
6194		},
6195		Attrs: attrs,
6196	}
6197	op := scope.AddOperation(opspec)
6198	return op.Output(0), op.Output(1)
6199}
6200
6201// Deprecated. Use TensorArrayGradV3
6202//
6203// DEPRECATED at GraphDef version 26: Use TensorArrayWriteV3
6204func TensorArrayWriteV2(scope *Scope, handle tf.Output, index tf.Output, value tf.Output, flow_in tf.Output) (flow_out tf.Output) {
6205	if scope.Err() != nil {
6206		return
6207	}
6208	opspec := tf.OpSpec{
6209		Type: "TensorArrayWriteV2",
6210		Input: []tf.Input{
6211			handle, index, value, flow_in,
6212		},
6213	}
6214	op := scope.AddOperation(opspec)
6215	return op.Output(0)
6216}
6217
6218// Deprecated. Use TensorArrayGradV3
6219//
6220// DEPRECATED at GraphDef version 26: Use TensorArrayGradV3
6221func TensorArrayGradV2(scope *Scope, handle tf.Output, flow_in tf.Output, source string) (grad_handle tf.Output) {
6222	if scope.Err() != nil {
6223		return
6224	}
6225	attrs := map[string]interface{}{"source": source}
6226	opspec := tf.OpSpec{
6227		Type: "TensorArrayGradV2",
6228		Input: []tf.Input{
6229			handle, flow_in,
6230		},
6231		Attrs: attrs,
6232	}
6233	op := scope.AddOperation(opspec)
6234	return op.Output(0)
6235}
6236
6237// TensorArrayV2Attr is an optional argument to TensorArrayV2.
6238type TensorArrayV2Attr func(optionalAttr)
6239
6240// TensorArrayV2ElementShape sets the optional element_shape attribute to value.
6241// If not specified, defaults to <unknown_rank:true >
6242func TensorArrayV2ElementShape(value tf.Shape) TensorArrayV2Attr {
6243	return func(m optionalAttr) {
6244		m["element_shape"] = value
6245	}
6246}
6247
6248// TensorArrayV2DynamicSize sets the optional dynamic_size attribute to value.
6249// If not specified, defaults to false
6250func TensorArrayV2DynamicSize(value bool) TensorArrayV2Attr {
6251	return func(m optionalAttr) {
6252		m["dynamic_size"] = value
6253	}
6254}
6255
6256// TensorArrayV2ClearAfterRead sets the optional clear_after_read attribute to value.
6257// If not specified, defaults to true
6258func TensorArrayV2ClearAfterRead(value bool) TensorArrayV2Attr {
6259	return func(m optionalAttr) {
6260		m["clear_after_read"] = value
6261	}
6262}
6263
6264// TensorArrayV2TensorArrayName sets the optional tensor_array_name attribute to value.
6265// If not specified, defaults to ""
6266func TensorArrayV2TensorArrayName(value string) TensorArrayV2Attr {
6267	return func(m optionalAttr) {
6268		m["tensor_array_name"] = value
6269	}
6270}
6271
6272// Deprecated. Use TensorArrayV3
6273//
6274// DEPRECATED at GraphDef version 26: Use TensorArrayV3
6275func TensorArrayV2(scope *Scope, size tf.Output, dtype tf.DataType, optional ...TensorArrayV2Attr) (handle tf.Output) {
6276	if scope.Err() != nil {
6277		return
6278	}
6279	attrs := map[string]interface{}{"dtype": dtype}
6280	for _, a := range optional {
6281		a(attrs)
6282	}
6283	opspec := tf.OpSpec{
6284		Type: "TensorArrayV2",
6285		Input: []tf.Input{
6286			size,
6287		},
6288		Attrs: attrs,
6289	}
6290	op := scope.AddOperation(opspec)
6291	return op.Output(0)
6292}
6293
6294// Split the data from the input value into TensorArray elements.
6295//
6296// Assuming that `lengths` takes on values
6297//
6298//   ```(n0, n1, ..., n(T-1))```
6299//
6300// and that `value` has shape
6301//
6302//   ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```,
6303//
6304// this splits values into a TensorArray with T tensors.
6305//
6306// TensorArray index t will be the subtensor of values with starting position
6307//
6308//   ```(n0 + n1 + ... + n(t-1), 0, 0, ...)```
6309//
6310// and having size
6311//
6312//   ```nt x d0 x d1 x ...```
6313//
6314// Arguments:
6315//	handle: The handle to a TensorArray.
6316//	value: The concatenated tensor to write to the TensorArray.
6317//	lengths: The vector of lengths, how to split the rows of value into the
6318// TensorArray.
6319//	flow_in: A float scalar that enforces proper chaining of operations.
6320//
6321// Returns A float scalar that enforces proper chaining of operations.
6322func TensorArraySplitV3(scope *Scope, handle tf.Output, value tf.Output, lengths tf.Output, flow_in tf.Output) (flow_out tf.Output) {
6323	if scope.Err() != nil {
6324		return
6325	}
6326	opspec := tf.OpSpec{
6327		Type: "TensorArraySplitV3",
6328		Input: []tf.Input{
6329			handle, value, lengths, flow_in,
6330		},
6331	}
6332	op := scope.AddOperation(opspec)
6333	return op.Output(0)
6334}
6335
6336// EmptyAttr is an optional argument to Empty.
6337type EmptyAttr func(optionalAttr)
6338
6339// EmptyInit sets the optional init attribute to value.
6340//
6341// value: If True, initialize the returned tensor with the default value of dtype.  Otherwise, the implementation is free not to initializethe tensor's content.
6342// If not specified, defaults to false
6343func EmptyInit(value bool) EmptyAttr {
6344	return func(m optionalAttr) {
6345		m["init"] = value
6346	}
6347}
6348
6349// Creates a tensor with the given shape.
6350//
6351// This operation creates a tensor of `shape` and `dtype`.
6352//
6353// Arguments:
6354//	shape: 1-D. Represents the shape of the output tensor.
6355//
6356//
6357// Returns A `Tensor` of type `T`.
6358func Empty(scope *Scope, shape tf.Output, dtype tf.DataType, optional ...EmptyAttr) (output tf.Output) {
6359	if scope.Err() != nil {
6360		return
6361	}
6362	attrs := map[string]interface{}{"dtype": dtype}
6363	for _, a := range optional {
6364		a(attrs)
6365	}
6366	opspec := tf.OpSpec{
6367		Type: "Empty",
6368		Input: []tf.Input{
6369			shape,
6370		},
6371		Attrs: attrs,
6372	}
6373	op := scope.AddOperation(opspec)
6374	return op.Output(0)
6375}
6376
6377// TensorArrayConcatV3Attr is an optional argument to TensorArrayConcatV3.
6378type TensorArrayConcatV3Attr func(optionalAttr)
6379
6380// TensorArrayConcatV3ElementShapeExcept0 sets the optional element_shape_except0 attribute to value.
6381//
6382// value: The expected shape of an element, if known,
6383// excluding the first dimension. Used to validate the shapes of
6384// TensorArray elements. If this shape is not fully specified, concatenating
6385// zero-size TensorArrays is an error.
6386// If not specified, defaults to <unknown_rank:true >
6387func TensorArrayConcatV3ElementShapeExcept0(value tf.Shape) TensorArrayConcatV3Attr {
6388	return func(m optionalAttr) {
6389		m["element_shape_except0"] = value
6390	}
6391}
6392
6393// Concat the elements from the TensorArray into value `value`.
6394//
6395// Takes `T` elements of shapes
6396//
6397//   ```
6398//   (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...)
6399//   ```
6400//
6401// and concatenates them into a Tensor of shape:
6402//
6403//   ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```
6404//
6405// All elements must have the same shape (excepting the first dimension).
6406//
6407// Arguments:
6408//	handle: The handle to a TensorArray.
6409//	flow_in: A float scalar that enforces proper chaining of operations.
6410//	dtype: The type of the elem that is returned.
6411//
6412// Returns All of the elements in the TensorArray, concatenated along the first
6413// axis.A vector of the row sizes of the original T elements in the
6414// value output.  In the example above, this would be the values:
6415// `(n1, n2, ..., n(T-1))`.
6416func TensorArrayConcatV3(scope *Scope, handle tf.Output, flow_in tf.Output, dtype tf.DataType, optional ...TensorArrayConcatV3Attr) (value tf.Output, lengths tf.Output) {
6417	if scope.Err() != nil {
6418		return
6419	}
6420	attrs := map[string]interface{}{"dtype": dtype}
6421	for _, a := range optional {
6422		a(attrs)
6423	}
6424	opspec := tf.OpSpec{
6425		Type: "TensorArrayConcatV3",
6426		Input: []tf.Input{
6427			handle, flow_in,
6428		},
6429		Attrs: attrs,
6430	}
6431	op := scope.AddOperation(opspec)
6432	return op.Output(0), op.Output(1)
6433}
6434
6435// Scatter the data from the input value into specific TensorArray elements.
6436//
6437// `indices` must be a vector, its length must match the first dim of `value`.
6438//
6439// Arguments:
6440//	handle: The handle to a TensorArray.
6441//	indices: The locations at which to write the tensor elements.
6442//	value: The concatenated tensor to write to the TensorArray.
6443//	flow_in: A float scalar that enforces proper chaining of operations.
6444//
6445// Returns A float scalar that enforces proper chaining of operations.
6446func TensorArrayScatterV3(scope *Scope, handle tf.Output, indices tf.Output, value tf.Output, flow_in tf.Output) (flow_out tf.Output) {
6447	if scope.Err() != nil {
6448		return
6449	}
6450	opspec := tf.OpSpec{
6451		Type: "TensorArrayScatterV3",
6452		Input: []tf.Input{
6453			handle, indices, value, flow_in,
6454		},
6455	}
6456	op := scope.AddOperation(opspec)
6457	return op.Output(0)
6458}
6459
6460// TensorArrayGatherV3Attr is an optional argument to TensorArrayGatherV3.
6461type TensorArrayGatherV3Attr func(optionalAttr)
6462
6463// TensorArrayGatherV3ElementShape sets the optional element_shape attribute to value.
6464//
6465// value: The expected shape of an element, if known. Used to
6466// validate the shapes of TensorArray elements. If this shape is not
6467// fully specified, gathering zero-size TensorArrays is an error.
6468// If not specified, defaults to <unknown_rank:true >
6469func TensorArrayGatherV3ElementShape(value tf.Shape) TensorArrayGatherV3Attr {
6470	return func(m optionalAttr) {
6471		m["element_shape"] = value
6472	}
6473}
6474
6475// Gather specific elements from the TensorArray into output `value`.
6476//
6477// All elements selected by `indices` must have the same shape.
6478//
6479// Arguments:
6480//	handle: The handle to a TensorArray.
6481//	indices: The locations in the TensorArray from which to read tensor elements.
6482//	flow_in: A float scalar that enforces proper chaining of operations.
6483//	dtype: The type of the elem that is returned.
6484//
6485// Returns All of the elements in the TensorArray, concatenated along a new
6486// axis (the new dimension 0).
6487func TensorArrayGatherV3(scope *Scope, handle tf.Output, indices tf.Output, flow_in tf.Output, dtype tf.DataType, optional ...TensorArrayGatherV3Attr) (value tf.Output) {
6488	if scope.Err() != nil {
6489		return
6490	}
6491	attrs := map[string]interface{}{"dtype": dtype}
6492	for _, a := range optional {
6493		a(attrs)
6494	}
6495	opspec := tf.OpSpec{
6496		Type: "TensorArrayGatherV3",
6497		Input: []tf.Input{
6498			handle, indices, flow_in,
6499		},
6500		Attrs: attrs,
6501	}
6502	op := scope.AddOperation(opspec)
6503	return op.Output(0)
6504}
6505
6506// Creates a TensorArray for storing multiple gradients of values in the given handle.
6507//
6508// Similar to TensorArrayGradV3. However it creates an accumulator with an
6509// expanded shape compared to the input TensorArray whose gradient is being
6510// computed. This enables multiple gradients for the same TensorArray to be
6511// calculated using the same accumulator.
6512//
6513// Arguments:
6514//	handle: The handle to the forward TensorArray.
6515//	flow_in: A float scalar that enforces proper chaining of operations.
6516//	shape_to_prepend: An int32 vector representing a shape. Elements in the gradient accumulator will
6517// have shape which is this shape_to_prepend value concatenated with shape of the
6518// elements in the TensorArray corresponding to the input handle.
6519//	source: The gradient source string, used to decide which gradient TensorArray
6520// to return.
6521func TensorArrayGradWithShape(scope *Scope, handle tf.Output, flow_in tf.Output, shape_to_prepend tf.Output, source string) (grad_handle tf.Output, flow_out tf.Output) {
6522	if scope.Err() != nil {
6523		return
6524	}
6525	attrs := map[string]interface{}{"source": source}
6526	opspec := tf.OpSpec{
6527		Type: "TensorArrayGradWithShape",
6528		Input: []tf.Input{
6529			handle, flow_in, shape_to_prepend,
6530		},
6531		Attrs: attrs,
6532	}
6533	op := scope.AddOperation(opspec)
6534	return op.Output(0), op.Output(1)
6535}
6536
6537// Creates a TensorArray for storing the gradients of values in the given handle.
6538//
6539// If the given TensorArray gradient already exists, returns a reference to it.
6540//
6541// Locks the size of the original TensorArray by disabling its dynamic size flag.
6542//
6543// **A note about the input flow_in:**
6544//
6545// The handle flow_in forces the execution of the gradient lookup to occur
6546// only after certain other operations have occurred.  For example, when
6547// the forward TensorArray is dynamically sized, writes to this TensorArray
6548// may resize the object.  The gradient TensorArray is statically sized based
6549// on the size of the forward TensorArray when this operation executes.
6550// Furthermore, the size of the forward TensorArray is frozen by this call.
6551// As a result, the flow is used to ensure that the call to generate the gradient
6552// TensorArray only happens after all writes are executed.
6553//
6554// In the case of dynamically sized TensorArrays, gradient computation should
6555// only be performed on read operations that have themselves been chained via
6556// flow to occur only after all writes have executed. That way the final size
6557// of the forward TensorArray is known when this operation is called.
6558//
6559// **A note about the source attribute:**
6560//
6561// TensorArray gradient calls use an accumulator TensorArray object.  If
6562// multiple gradients are calculated and run in the same session, the multiple
6563// gradient nodes may accidentally flow through the same accumulator TensorArray.
6564// This double counts and generally breaks the TensorArray gradient flow.
6565//
6566// The solution is to identify which gradient call this particular
6567// TensorArray gradient is being called in.  This is performed by identifying
6568// a unique string (e.g. "gradients", "gradients_1", ...) from the input
6569// gradient Tensor's name.  This string is used as a suffix when creating
6570// the TensorArray gradient object here (the attribute `source`).
6571//
6572// The attribute `source` is added as a suffix to the forward TensorArray's
6573// name when performing the creation / lookup, so that each separate gradient
6574// calculation gets its own TensorArray accumulator.
6575//
6576// Arguments:
6577//	handle: The handle to the forward TensorArray.
6578//	flow_in: A float scalar that enforces proper chaining of operations.
6579//	source: The gradient source string, used to decide which gradient TensorArray
6580// to return.
6581func TensorArrayGradV3(scope *Scope, handle tf.Output, flow_in tf.Output, source string) (grad_handle tf.Output, flow_out tf.Output) {
6582	if scope.Err() != nil {
6583		return
6584	}
6585	attrs := map[string]interface{}{"source": source}
6586	opspec := tf.OpSpec{
6587		Type: "TensorArrayGradV3",
6588		Input: []tf.Input{
6589			handle, flow_in,
6590		},
6591		Attrs: attrs,
6592	}
6593	op := scope.AddOperation(opspec)
6594	return op.Output(0), op.Output(1)
6595}
6596
6597// Pop the element at the top of the stack.
6598//
6599// Arguments:
6600//	handle: The handle to a stack.
6601//	elem_type: The type of the elem that is popped.
6602//
6603// Returns The tensor that is popped from the top of the stack.
6604func StackPopV2(scope *Scope, handle tf.Output, elem_type tf.DataType) (elem tf.Output) {
6605	if scope.Err() != nil {
6606		return
6607	}
6608	attrs := map[string]interface{}{"elem_type": elem_type}
6609	opspec := tf.OpSpec{
6610		Type: "StackPopV2",
6611		Input: []tf.Input{
6612			handle,
6613		},
6614		Attrs: attrs,
6615	}
6616	op := scope.AddOperation(opspec)
6617	return op.Output(0)
6618}
6619
6620// OneHotAttr is an optional argument to OneHot.
6621type OneHotAttr func(optionalAttr)
6622
6623// OneHotAxis sets the optional axis attribute to value.
6624//
6625// value: The axis to fill (default: -1, a new inner-most axis).
6626// If not specified, defaults to -1
6627func OneHotAxis(value int64) OneHotAttr {
6628	return func(m optionalAttr) {
6629		m["axis"] = value
6630	}
6631}
6632
6633// Returns a one-hot tensor.
6634//
6635// The locations represented by indices in `indices` take value `on_value`,
6636// while all other locations take value `off_value`.
6637//
6638// If the input `indices` is rank `N`, the output will have rank `N+1`,
6639// The new axis is created at dimension `axis` (default: the new axis is
6640// appended at the end).
6641//
6642// If `indices` is a scalar the output shape will be a vector of length `depth`.
6643//
6644// If `indices` is a vector of length `features`, the output shape will be:
6645// ```
6646//   features x depth if axis == -1
6647//   depth x features if axis == 0
6648// ```
6649//
6650// If `indices` is a matrix (batch) with shape `[batch, features]`,
6651// the output shape will be:
6652// ```
6653//   batch x features x depth if axis == -1
6654//   batch x depth x features if axis == 1
6655//   depth x batch x features if axis == 0
6656// ```
6657//
6658//
6659// Examples
6660// =========
6661//
6662// Suppose that
6663// ```
6664//   indices = [0, 2, -1, 1]
6665//   depth = 3
6666//   on_value = 5.0
6667//   off_value = 0.0
6668//   axis = -1
6669// ```
6670//
6671// Then output is `[4 x 3]`:
6672// ```
6673// output =
6674//   [5.0 0.0 0.0]  // one_hot(0)
6675//   [0.0 0.0 5.0]  // one_hot(2)
6676//   [0.0 0.0 0.0]  // one_hot(-1)
6677//   [0.0 5.0 0.0]  // one_hot(1)
6678// ```
6679//
6680// Suppose that
6681// ```
6682//   indices = [0, 2, -1, 1]
6683//   depth = 3
6684//   on_value = 0.0
6685//   off_value = 3.0
6686//   axis = 0
6687// ```
6688//
6689// Then output is `[3 x 4]`:
6690// ```
6691// output =
6692//   [0.0 3.0 3.0 3.0]
6693//   [3.0 3.0 3.0 0.0]
6694//   [3.0 3.0 3.0 3.0]
6695//   [3.0 0.0 3.0 3.0]
6696// //  ^                one_hot(0)
6697// //      ^            one_hot(2)
6698// //          ^        one_hot(-1)
6699// //              ^    one_hot(1)
6700// ```
6701//
6702// Suppose that
6703// ```
6704//   indices = [[0, 2], [1, -1]]
6705//   depth = 3
6706//   on_value = 1.0
6707//   off_value = 0.0
6708//   axis = -1
6709// ```
6710//
6711// Then output is `[2 x 2 x 3]`:
6712// ```
6713// output =
6714//   [
6715//     [1.0, 0.0, 0.0]  // one_hot(0)
6716//     [0.0, 0.0, 1.0]  // one_hot(2)
6717//   ][
6718//     [0.0, 1.0, 0.0]  // one_hot(1)
6719//     [0.0, 0.0, 0.0]  // one_hot(-1)
6720//   ]
6721// ```
6722//
6723// Arguments:
6724//	indices: A tensor of indices.
6725//	depth: A scalar defining the depth of the one hot dimension.
6726//	on_value: A scalar defining the value to fill in output when `indices[j] = i`.
6727//	off_value: A scalar defining the value to fill in output when `indices[j] != i`.
6728//
6729// Returns The one-hot tensor.
6730func OneHot(scope *Scope, indices tf.Output, depth tf.Output, on_value tf.Output, off_value tf.Output, optional ...OneHotAttr) (output tf.Output) {
6731	if scope.Err() != nil {
6732		return
6733	}
6734	attrs := map[string]interface{}{}
6735	for _, a := range optional {
6736		a(attrs)
6737	}
6738	opspec := tf.OpSpec{
6739		Type: "OneHot",
6740		Input: []tf.Input{
6741			indices, depth, on_value, off_value,
6742		},
6743		Attrs: attrs,
6744	}
6745	op := scope.AddOperation(opspec)
6746	return op.Output(0)
6747}
6748
6749// Computes the number of elements in the given queue.
6750//
6751// Arguments:
6752//	handle: The handle to a queue.
6753//
6754// Returns The number of elements in the given queue.
6755func QueueSizeV2(scope *Scope, handle tf.Output) (size tf.Output) {
6756	if scope.Err() != nil {
6757		return
6758	}
6759	opspec := tf.OpSpec{
6760		Type: "QueueSizeV2",
6761		Input: []tf.Input{
6762			handle,
6763		},
6764	}
6765	op := scope.AddOperation(opspec)
6766	return op.Output(0)
6767}
6768
6769// QueueDequeueManyV2Attr is an optional argument to QueueDequeueManyV2.
6770type QueueDequeueManyV2Attr func(optionalAttr)
6771
6772// QueueDequeueManyV2TimeoutMs sets the optional timeout_ms attribute to value.
6773//
6774// value: If the queue has fewer than n elements, this operation
6775// will block for up to timeout_ms milliseconds.
6776// Note: This option is not supported yet.
6777// If not specified, defaults to -1
6778func QueueDequeueManyV2TimeoutMs(value int64) QueueDequeueManyV2Attr {
6779	return func(m optionalAttr) {
6780		m["timeout_ms"] = value
6781	}
6782}
6783
6784// Dequeues `n` tuples of one or more tensors from the given queue.
6785//
6786// If the queue is closed and there are fewer than `n` elements, then an
6787// OutOfRange error is returned.
6788//
6789// This operation concatenates queue-element component tensors along the
6790// 0th dimension to make a single component tensor.  All of the components
6791// in the dequeued tuple will have size `n` in the 0th dimension.
6792//
6793// This operation has `k` outputs, where `k` is the number of components in
6794// the tuples stored in the given queue, and output `i` is the ith
6795// component of the dequeued tuple.
6796//
6797// N.B. If the queue is empty, this operation will block until `n` elements
6798// have been dequeued (or 'timeout_ms' elapses, if specified).
6799//
6800// Arguments:
6801//	handle: The handle to a queue.
6802//	n: The number of tuples to dequeue.
6803//	component_types: The type of each component in a tuple.
6804//
6805// Returns One or more tensors that were dequeued as a tuple.
6806func QueueDequeueManyV2(scope *Scope, handle tf.Output, n tf.Output, component_types []tf.DataType, optional ...QueueDequeueManyV2Attr) (components []tf.Output) {
6807	if scope.Err() != nil {
6808		return
6809	}
6810	attrs := map[string]interface{}{"component_types": component_types}
6811	for _, a := range optional {
6812		a(attrs)
6813	}
6814	opspec := tf.OpSpec{
6815		Type: "QueueDequeueManyV2",
6816		Input: []tf.Input{
6817			handle, n,
6818		},
6819		Attrs: attrs,
6820	}
6821	op := scope.AddOperation(opspec)
6822	if scope.Err() != nil {
6823		return
6824	}
6825	var idx int
6826	var err error
6827	if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
6828		scope.UpdateErr("QueueDequeueManyV2", err)
6829		return
6830	}
6831	return components
6832}
6833
6834// QuantizeAndDequantizeAttr is an optional argument to QuantizeAndDequantize.
6835type QuantizeAndDequantizeAttr func(optionalAttr)
6836
6837// QuantizeAndDequantizeSignedInput sets the optional signed_input attribute to value.
6838// If not specified, defaults to true
6839func QuantizeAndDequantizeSignedInput(value bool) QuantizeAndDequantizeAttr {
6840	return func(m optionalAttr) {
6841		m["signed_input"] = value
6842	}
6843}
6844
6845// QuantizeAndDequantizeNumBits sets the optional num_bits attribute to value.
6846// If not specified, defaults to 8
6847func QuantizeAndDequantizeNumBits(value int64) QuantizeAndDequantizeAttr {
6848	return func(m optionalAttr) {
6849		m["num_bits"] = value
6850	}
6851}
6852
6853// QuantizeAndDequantizeRangeGiven sets the optional range_given attribute to value.
6854// If not specified, defaults to false
6855func QuantizeAndDequantizeRangeGiven(value bool) QuantizeAndDequantizeAttr {
6856	return func(m optionalAttr) {
6857		m["range_given"] = value
6858	}
6859}
6860
6861// QuantizeAndDequantizeInputMin sets the optional input_min attribute to value.
6862// If not specified, defaults to 0
6863func QuantizeAndDequantizeInputMin(value float32) QuantizeAndDequantizeAttr {
6864	return func(m optionalAttr) {
6865		m["input_min"] = value
6866	}
6867}
6868
6869// QuantizeAndDequantizeInputMax sets the optional input_max attribute to value.
6870// If not specified, defaults to 0
6871func QuantizeAndDequantizeInputMax(value float32) QuantizeAndDequantizeAttr {
6872	return func(m optionalAttr) {
6873		m["input_max"] = value
6874	}
6875}
6876
6877// Use QuantizeAndDequantizeV2 instead.
6878//
6879// DEPRECATED at GraphDef version 22: Replaced by QuantizeAndDequantizeV2
6880func QuantizeAndDequantize(scope *Scope, input tf.Output, optional ...QuantizeAndDequantizeAttr) (output tf.Output) {
6881	if scope.Err() != nil {
6882		return
6883	}
6884	attrs := map[string]interface{}{}
6885	for _, a := range optional {
6886		a(attrs)
6887	}
6888	opspec := tf.OpSpec{
6889		Type: "QuantizeAndDequantize",
6890		Input: []tf.Input{
6891			input,
6892		},
6893		Attrs: attrs,
6894	}
6895	op := scope.AddOperation(opspec)
6896	return op.Output(0)
6897}
6898
6899// Returns locations of nonzero / true values in a tensor.
6900//
6901// This operation returns the coordinates of true elements in `condition`. The
6902// coordinates are returned in a 2-D tensor where the first dimension (rows)
6903// represents the number of true elements, and the second dimension (columns)
6904// represents the coordinates of the true elements. Keep in mind, the shape of
6905// the output tensor can vary depending on how many true values there are in
6906// `condition`. Indices are output in row-major order.
6907//
6908// For example:
6909//
6910// ```
6911// # 'input' tensor is [[True, False]
6912// #                    [True, False]]
6913// # 'input' has two true values, so output has two coordinates.
6914// # 'input' has rank of 2, so coordinates have two indices.
6915// where(input) ==> [[0, 0],
6916//                   [1, 0]]
6917//
6918// # `condition` tensor is [[[True, False]
6919// #                     [True, False]]
6920// #                    [[False, True]
6921// #                     [False, True]]
6922// #                    [[False, False]
6923// #                     [False, True]]]
6924// # 'input' has 5 true values, so output has 5 coordinates.
6925// # 'input' has rank of 3, so coordinates have three indices.
6926// where(input) ==> [[0, 0, 0],
6927//                   [0, 1, 0],
6928//                   [1, 0, 1],
6929//                   [1, 1, 1],
6930//                   [2, 1, 1]]
6931//
6932// # `condition` tensor is [[[1.5,  0.0]
6933// #                     [-0.5, 0.0]]
6934// #                    [[0.0,  0.25]
6935// #                     [0.0,  0.75]]
6936// #                    [[0.0,  0.0]
6937// #                     [0.0,  0.01]]]
6938// # 'input' has 5 nonzero values, so output has 5 coordinates.
6939// # 'input' has rank of 3, so coordinates have three indices.
6940// where(input) ==> [[0, 0, 0],
6941//                   [0, 1, 0],
6942//                   [1, 0, 1],
6943//                   [1, 1, 1],
6944//                   [2, 1, 1]]
6945//
6946// # `condition` tensor is [[[1.5 + 0.0j, 0.0  + 0.0j]
6947// #                     [0.0 + 0.5j, 0.0  + 0.0j]]
6948// #                    [[0.0 + 0.0j, 0.25 + 1.5j]
6949// #                     [0.0 + 0.0j, 0.75 + 0.0j]]
6950// #                    [[0.0 + 0.0j, 0.0  + 0.0j]
6951// #                     [0.0 + 0.0j, 0.01 + 0.0j]]]
6952// # 'input' has 5 nonzero magnitude values, so output has 5 coordinates.
6953// # 'input' has rank of 3, so coordinates have three indices.
6954// where(input) ==> [[0, 0, 0],
6955//                   [0, 1, 0],
6956//                   [1, 0, 1],
6957//                   [1, 1, 1],
6958//                   [2, 1, 1]]
6959// ```
6960func Where(scope *Scope, condition tf.Output) (index tf.Output) {
6961	if scope.Err() != nil {
6962		return
6963	}
6964	opspec := tf.OpSpec{
6965		Type: "Where",
6966		Input: []tf.Input{
6967			condition,
6968		},
6969	}
6970	op := scope.AddOperation(opspec)
6971	return op.Output(0)
6972}
6973
6974// QueueDequeueV2Attr is an optional argument to QueueDequeueV2.
6975type QueueDequeueV2Attr func(optionalAttr)
6976
6977// QueueDequeueV2TimeoutMs sets the optional timeout_ms attribute to value.
6978//
6979// value: If the queue is empty, this operation will block for up to
6980// timeout_ms milliseconds.
6981// Note: This option is not supported yet.
6982// If not specified, defaults to -1
6983func QueueDequeueV2TimeoutMs(value int64) QueueDequeueV2Attr {
6984	return func(m optionalAttr) {
6985		m["timeout_ms"] = value
6986	}
6987}
6988
6989// Dequeues a tuple of one or more tensors from the given queue.
6990//
6991// This operation has k outputs, where k is the number of components
6992// in the tuples stored in the given queue, and output i is the ith
6993// component of the dequeued tuple.
6994//
6995// N.B. If the queue is empty, this operation will block until an element
6996// has been dequeued (or 'timeout_ms' elapses, if specified).
6997//
6998// Arguments:
6999//	handle: The handle to a queue.
7000//	component_types: The type of each component in a tuple.
7001//
7002// Returns One or more tensors that were dequeued as a tuple.
7003func QueueDequeueV2(scope *Scope, handle tf.Output, component_types []tf.DataType, optional ...QueueDequeueV2Attr) (components []tf.Output) {
7004	if scope.Err() != nil {
7005		return
7006	}
7007	attrs := map[string]interface{}{"component_types": component_types}
7008	for _, a := range optional {
7009		a(attrs)
7010	}
7011	opspec := tf.OpSpec{
7012		Type: "QueueDequeueV2",
7013		Input: []tf.Input{
7014			handle,
7015		},
7016		Attrs: attrs,
7017	}
7018	op := scope.AddOperation(opspec)
7019	if scope.Err() != nil {
7020		return
7021	}
7022	var idx int
7023	var err error
7024	if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
7025		scope.UpdateErr("QueueDequeueV2", err)
7026		return
7027	}
7028	return components
7029}
7030
7031// QueueEnqueueV2Attr is an optional argument to QueueEnqueueV2.
7032type QueueEnqueueV2Attr func(optionalAttr)
7033
7034// QueueEnqueueV2TimeoutMs sets the optional timeout_ms attribute to value.
7035//
7036// value: If the queue is full, this operation will block for up to
7037// timeout_ms milliseconds.
7038// Note: This option is not supported yet.
7039// If not specified, defaults to -1
7040func QueueEnqueueV2TimeoutMs(value int64) QueueEnqueueV2Attr {
7041	return func(m optionalAttr) {
7042		m["timeout_ms"] = value
7043	}
7044}
7045
7046// Enqueues a tuple of one or more tensors in the given queue.
7047//
7048// The components input has k elements, which correspond to the components of
7049// tuples stored in the given queue.
7050//
7051// N.B. If the queue is full, this operation will block until the given
7052// element has been enqueued (or 'timeout_ms' elapses, if specified).
7053//
7054// Arguments:
7055//	handle: The handle to a queue.
7056//	components: One or more tensors from which the enqueued tensors should be taken.
7057//
7058// Returns the created operation.
7059func QueueEnqueueV2(scope *Scope, handle tf.Output, components []tf.Output, optional ...QueueEnqueueV2Attr) (o *tf.Operation) {
7060	if scope.Err() != nil {
7061		return
7062	}
7063	attrs := map[string]interface{}{}
7064	for _, a := range optional {
7065		a(attrs)
7066	}
7067	opspec := tf.OpSpec{
7068		Type: "QueueEnqueueV2",
7069		Input: []tf.Input{
7070			handle, tf.OutputList(components),
7071		},
7072		Attrs: attrs,
7073	}
7074	return scope.AddOperation(opspec)
7075}
7076
7077// MfccAttr is an optional argument to Mfcc.
7078type MfccAttr func(optionalAttr)
7079
7080// MfccUpperFrequencyLimit sets the optional upper_frequency_limit attribute to value.
7081//
7082// value: The highest frequency to use when calculating the
7083// ceptstrum.
7084// If not specified, defaults to 4000
7085func MfccUpperFrequencyLimit(value float32) MfccAttr {
7086	return func(m optionalAttr) {
7087		m["upper_frequency_limit"] = value
7088	}
7089}
7090
7091// MfccLowerFrequencyLimit sets the optional lower_frequency_limit attribute to value.
7092//
7093// value: The lowest frequency to use when calculating the
7094// ceptstrum.
7095// If not specified, defaults to 20
7096func MfccLowerFrequencyLimit(value float32) MfccAttr {
7097	return func(m optionalAttr) {
7098		m["lower_frequency_limit"] = value
7099	}
7100}
7101
7102// MfccFilterbankChannelCount sets the optional filterbank_channel_count attribute to value.
7103//
7104// value: Resolution of the Mel bank used internally.
7105// If not specified, defaults to 40
7106func MfccFilterbankChannelCount(value int64) MfccAttr {
7107	return func(m optionalAttr) {
7108		m["filterbank_channel_count"] = value
7109	}
7110}
7111
7112// MfccDctCoefficientCount sets the optional dct_coefficient_count attribute to value.
7113//
7114// value: How many output channels to produce per time slice.
7115// If not specified, defaults to 13
7116func MfccDctCoefficientCount(value int64) MfccAttr {
7117	return func(m optionalAttr) {
7118		m["dct_coefficient_count"] = value
7119	}
7120}
7121
7122// Transforms a spectrogram into a form that's useful for speech recognition.
7123//
7124// Mel Frequency Cepstral Coefficients are a way of representing audio data that's
7125// been effective as an input feature for machine learning. They are created by
7126// taking the spectrum of a spectrogram (a 'cepstrum'), and discarding some of the
7127// higher frequencies that are less significant to the human ear. They have a long
7128// history in the speech recognition world, and https://en.wikipedia.org/wiki/Mel-frequency_cepstrum
7129// is a good resource to learn more.
7130//
7131// Arguments:
7132//	spectrogram: Typically produced by the Spectrogram op, with magnitude_squared
7133// set to true.
7134//	sample_rate: How many samples per second the source audio used.
7135func Mfcc(scope *Scope, spectrogram tf.Output, sample_rate tf.Output, optional ...MfccAttr) (output tf.Output) {
7136	if scope.Err() != nil {
7137		return
7138	}
7139	attrs := map[string]interface{}{}
7140	for _, a := range optional {
7141		a(attrs)
7142	}
7143	opspec := tf.OpSpec{
7144		Type: "Mfcc",
7145		Input: []tf.Input{
7146			spectrogram, sample_rate,
7147		},
7148		Attrs: attrs,
7149	}
7150	op := scope.AddOperation(opspec)
7151	return op.Output(0)
7152}
7153
7154// PaddingFIFOQueueV2Attr is an optional argument to PaddingFIFOQueueV2.
7155type PaddingFIFOQueueV2Attr func(optionalAttr)
7156
7157// PaddingFIFOQueueV2Shapes sets the optional shapes attribute to value.
7158//
7159// value: The shape of each component in a value. The length of this attr must
7160// be either 0 or the same as the length of component_types.
7161// Shapes of fixed rank but variable size are allowed by setting
7162// any shape dimension to -1.  In this case, the inputs' shape may vary along
7163// the given dimension, and DequeueMany will pad the given dimension with
7164// zeros up to the maximum shape of all elements in the given batch.
7165// If the length of this attr is 0, different queue elements may have
7166// different ranks and shapes, but only one element may be dequeued at a time.
7167// If not specified, defaults to <>
7168//
7169// REQUIRES: len(value) >= 0
7170func PaddingFIFOQueueV2Shapes(value []tf.Shape) PaddingFIFOQueueV2Attr {
7171	return func(m optionalAttr) {
7172		m["shapes"] = value
7173	}
7174}
7175
7176// PaddingFIFOQueueV2Capacity sets the optional capacity attribute to value.
7177//
7178// value: The upper bound on the number of elements in this queue.
7179// Negative numbers mean no limit.
7180// If not specified, defaults to -1
7181func PaddingFIFOQueueV2Capacity(value int64) PaddingFIFOQueueV2Attr {
7182	return func(m optionalAttr) {
7183		m["capacity"] = value
7184	}
7185}
7186
7187// PaddingFIFOQueueV2Container sets the optional container attribute to value.
7188//
7189// value: If non-empty, this queue is placed in the given container.
7190// Otherwise, a default container is used.
7191// If not specified, defaults to ""
7192func PaddingFIFOQueueV2Container(value string) PaddingFIFOQueueV2Attr {
7193	return func(m optionalAttr) {
7194		m["container"] = value
7195	}
7196}
7197
7198// PaddingFIFOQueueV2SharedName sets the optional shared_name attribute to value.
7199//
7200// value: If non-empty, this queue will be shared under the given name
7201// across multiple sessions.
7202// If not specified, defaults to ""
7203func PaddingFIFOQueueV2SharedName(value string) PaddingFIFOQueueV2Attr {
7204	return func(m optionalAttr) {
7205		m["shared_name"] = value
7206	}
7207}
7208
7209// A queue that produces elements in first-in first-out order.
7210//
7211// Variable-size shapes are allowed by setting the corresponding shape dimensions
7212// to 0 in the shape attr.  In this case DequeueMany will pad up to the maximum
7213// size of any given element in the minibatch.  See below for details.
7214//
7215// Arguments:
7216//	component_types: The type of each component in a value.
7217//
7218// Returns The handle to the queue.
7219func PaddingFIFOQueueV2(scope *Scope, component_types []tf.DataType, optional ...PaddingFIFOQueueV2Attr) (handle tf.Output) {
7220	if scope.Err() != nil {
7221		return
7222	}
7223	attrs := map[string]interface{}{"component_types": component_types}
7224	for _, a := range optional {
7225		a(attrs)
7226	}
7227	opspec := tf.OpSpec{
7228		Type: "PaddingFIFOQueueV2",
7229
7230		Attrs: attrs,
7231	}
7232	op := scope.AddOperation(opspec)
7233	return op.Output(0)
7234}
7235
7236// Interleave the values from the `data` tensors into a single tensor.
7237//
7238// Builds a merged tensor such that
7239//
7240// ```python
7241//     merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
7242// ```
7243//
7244// For example, if each `indices[m]` is scalar or vector, we have
7245//
7246// ```python
7247//     # Scalar indices:
7248//     merged[indices[m], ...] = data[m][...]
7249//
7250//     # Vector indices:
7251//     merged[indices[m][i], ...] = data[m][i, ...]
7252// ```
7253//
7254// Each `data[i].shape` must start with the corresponding `indices[i].shape`,
7255// and the rest of `data[i].shape` must be constant w.r.t. `i`.  That is, we
7256// must have `data[i].shape = indices[i].shape + constant`.  In terms of this
7257// `constant`, the output shape is
7258//
7259//     merged.shape = [max(indices)] + constant
7260//
7261// Values may be merged in parallel, so if an index appears in both `indices[m][i]`
7262// and `indices[n][j]`, the result may be invalid. This differs from the normal
7263// DynamicStitch operator that defines the behavior in that case.
7264//
7265// For example:
7266//
7267// ```python
7268//     indices[0] = 6
7269//     indices[1] = [4, 1]
7270//     indices[2] = [[5, 2], [0, 3]]
7271//     data[0] = [61, 62]
7272//     data[1] = [[41, 42], [11, 12]]
7273//     data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
7274//     merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
7275//               [51, 52], [61, 62]]
7276// ```
7277//
7278// This method can be used to merge partitions created by `dynamic_partition`
7279// as illustrated on the following example:
7280//
7281// ```python
7282//     # Apply function (increments x_i) on elements for which a certain condition
7283//     # apply (x_i != -1 in this example).
7284//     x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])
7285//     condition_mask=tf.not_equal(x,tf.constant(-1.))
7286//     partitioned_data = tf.dynamic_partition(
7287//         x, tf.cast(condition_mask, tf.int32) , 2)
7288//     partitioned_data[1] = partitioned_data[1] + 1.0
7289//     condition_indices = tf.dynamic_partition(
7290//         tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)
7291//     x = tf.dynamic_stitch(condition_indices, partitioned_data)
7292//     # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain
7293//     # unchanged.
7294// ```
7295//
7296// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
7297// <img style="width:100%" src="https://www.tensorflow.org/images/DynamicStitch.png" alt>
7298// </div>
7299func ParallelDynamicStitch(scope *Scope, indices []tf.Output, data []tf.Output) (merged tf.Output) {
7300	if scope.Err() != nil {
7301		return
7302	}
7303	opspec := tf.OpSpec{
7304		Type: "ParallelDynamicStitch",
7305		Input: []tf.Input{
7306			tf.OutputList(indices), tf.OutputList(data),
7307		},
7308	}
7309	op := scope.AddOperation(opspec)
7310	return op.Output(0)
7311}
7312
7313// Partitions `data` into `num_partitions` tensors using indices from `partitions`.
7314//
7315// For each index tuple `js` of size `partitions.ndim`, the slice `data[js, ...]`
7316// becomes part of `outputs[partitions[js]]`.  The slices with `partitions[js] = i`
7317// are placed in `outputs[i]` in lexicographic order of `js`, and the first
7318// dimension of `outputs[i]` is the number of entries in `partitions` equal to `i`.
7319// In detail,
7320//
7321// ```python
7322//     outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:]
7323//
7324//     outputs[i] = pack([data[js, ...] for js if partitions[js] == i])
7325// ```
7326//
7327// `data.shape` must start with `partitions.shape`.
7328//
7329// For example:
7330//
7331// ```python
7332//     # Scalar partitions.
7333//     partitions = 1
7334//     num_partitions = 2
7335//     data = [10, 20]
7336//     outputs[0] = []  # Empty with shape [0, 2]
7337//     outputs[1] = [[10, 20]]
7338//
7339//     # Vector partitions.
7340//     partitions = [0, 0, 1, 1, 0]
7341//     num_partitions = 2
7342//     data = [10, 20, 30, 40, 50]
7343//     outputs[0] = [10, 20, 50]
7344//     outputs[1] = [30, 40]
7345// ```
7346//
7347// See `dynamic_stitch` for an example on how to merge partitions back.
7348//
7349// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
7350// <img style="width:100%" src="https://www.tensorflow.org/images/DynamicPartition.png" alt>
7351// </div>
7352//
7353// Arguments:
7354//
7355//	partitions: Any shape.  Indices in the range `[0, num_partitions)`.
7356//	num_partitions: The number of partitions to output.
7357func DynamicPartition(scope *Scope, data tf.Output, partitions tf.Output, num_partitions int64) (outputs []tf.Output) {
7358	if scope.Err() != nil {
7359		return
7360	}
7361	attrs := map[string]interface{}{"num_partitions": num_partitions}
7362	opspec := tf.OpSpec{
7363		Type: "DynamicPartition",
7364		Input: []tf.Input{
7365			data, partitions,
7366		},
7367		Attrs: attrs,
7368	}
7369	op := scope.AddOperation(opspec)
7370	if scope.Err() != nil {
7371		return
7372	}
7373	var idx int
7374	var err error
7375	if outputs, idx, err = makeOutputList(op, idx, "outputs"); err != nil {
7376		scope.UpdateErr("DynamicPartition", err)
7377		return
7378	}
7379	return outputs
7380}
7381
7382// Produces a string handle for the given MultiDeviceIterator.
7383//
7384// Arguments:
7385//	multi_device_iterator: A MultiDeviceIterator resource.
7386//
7387// Returns A string representing the resource.
7388func MultiDeviceIteratorToStringHandle(scope *Scope, multi_device_iterator tf.Output) (string_handle tf.Output) {
7389	if scope.Err() != nil {
7390		return
7391	}
7392	opspec := tf.OpSpec{
7393		Type: "MultiDeviceIteratorToStringHandle",
7394		Input: []tf.Input{
7395			multi_device_iterator,
7396		},
7397	}
7398	op := scope.AddOperation(opspec)
7399	return op.Output(0)
7400}
7401
7402// Checks whether a tree has been initialized.
7403//
7404// Arguments:
7405//	tree_handle: Handle to the tree.
7406//
7407// Returns Whether the tree is initialized.
7408func TensorForestTreeIsInitializedOp(scope *Scope, tree_handle tf.Output) (is_initialized tf.Output) {
7409	if scope.Err() != nil {
7410		return
7411	}
7412	opspec := tf.OpSpec{
7413		Type: "TensorForestTreeIsInitializedOp",
7414		Input: []tf.Input{
7415			tree_handle,
7416		},
7417	}
7418	op := scope.AddOperation(opspec)
7419	return op.Output(0)
7420}
7421
7422// Gets next element for the provided shard number.
7423//
7424// Arguments:
7425//	multi_device_iterator: A MultiDeviceIterator resource.
7426//	shard_num: Integer representing which shard to fetch data for.
7427//	incarnation_id: Which incarnation of the MultiDeviceIterator is running.
7428//	output_types: The type list for the return values.
7429//	output_shapes: The list of shapes being produced.
7430//
7431// Returns Result of the get_next on the dataset.
7432func MultiDeviceIteratorGetNextFromShard(scope *Scope, multi_device_iterator tf.Output, shard_num tf.Output, incarnation_id tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (components []tf.Output) {
7433	if scope.Err() != nil {
7434		return
7435	}
7436	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
7437	opspec := tf.OpSpec{
7438		Type: "MultiDeviceIteratorGetNextFromShard",
7439		Input: []tf.Input{
7440			multi_device_iterator, shard_num, incarnation_id,
7441		},
7442		Attrs: attrs,
7443	}
7444	op := scope.AddOperation(opspec)
7445	if scope.Err() != nil {
7446		return
7447	}
7448	var idx int
7449	var err error
7450	if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
7451		scope.UpdateErr("MultiDeviceIteratorGetNextFromShard", err)
7452		return
7453	}
7454	return components
7455}
7456
7457// Initializes the multi device iterator with the given dataset.
7458//
7459// Arguments:
7460//	dataset: Dataset to be iterated upon.
7461//	multi_device_iterator: A MultiDeviceIteratorResource.
7462//	max_buffer_size: The maximum size of the host side per device buffer to keep.
7463//
7464// Returns An int64 indicating which incarnation of the MultiDeviceIterator
7465// is running.
7466func MultiDeviceIteratorInit(scope *Scope, dataset tf.Output, multi_device_iterator tf.Output, max_buffer_size tf.Output) (incarnation_id tf.Output) {
7467	if scope.Err() != nil {
7468		return
7469	}
7470	opspec := tf.OpSpec{
7471		Type: "MultiDeviceIteratorInit",
7472		Input: []tf.Input{
7473			dataset, multi_device_iterator, max_buffer_size,
7474		},
7475	}
7476	op := scope.AddOperation(opspec)
7477	return op.Output(0)
7478}
7479
7480// Copy a tensor setting everything outside a central band in each innermost matrix
7481//
7482// to zero.
7483//
7484// The `band` part is computed as follows:
7485// Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a
7486// tensor with the same shape where
7487//
7488// `band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`.
7489//
7490// The indicator function
7491//
7492// `in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) &&
7493//                  (num_upper < 0 || (n-m) <= num_upper)`.
7494//
7495// For example:
7496//
7497// ```
7498// # if 'input' is [[ 0,  1,  2, 3]
7499//                  [-1,  0,  1, 2]
7500//                  [-2, -1,  0, 1]
7501//                  [-3, -2, -1, 0]],
7502//
7503// tf.matrix_band_part(input, 1, -1) ==> [[ 0,  1,  2, 3]
7504//                                        [-1,  0,  1, 2]
7505//                                        [ 0, -1,  0, 1]
7506//                                        [ 0,  0, -1, 0]],
7507//
7508// tf.matrix_band_part(input, 2, 1) ==> [[ 0,  1,  0, 0]
7509//                                       [-1,  0,  1, 0]
7510//                                       [-2, -1,  0, 1]
7511//                                       [ 0, -2, -1, 0]]
7512// ```
7513//
7514// Useful special cases:
7515//
7516// ```
7517//  tf.matrix_band_part(input, 0, -1) ==> Upper triangular part.
7518//  tf.matrix_band_part(input, -1, 0) ==> Lower triangular part.
7519//  tf.matrix_band_part(input, 0, 0) ==> Diagonal.
7520// ```
7521//
7522// Arguments:
7523//	input: Rank `k` tensor.
7524//	num_lower: 0-D tensor. Number of subdiagonals to keep. If negative, keep entire
7525// lower triangle.
7526//	num_upper: 0-D tensor. Number of superdiagonals to keep. If negative, keep
7527// entire upper triangle.
7528//
7529// Returns Rank `k` tensor of the same shape as input. The extracted banded tensor.
7530func MatrixBandPart(scope *Scope, input tf.Output, num_lower tf.Output, num_upper tf.Output) (band tf.Output) {
7531	if scope.Err() != nil {
7532		return
7533	}
7534	opspec := tf.OpSpec{
7535		Type: "MatrixBandPart",
7536		Input: []tf.Input{
7537			input, num_lower, num_upper,
7538		},
7539	}
7540	op := scope.AddOperation(opspec)
7541	return op.Output(0)
7542}
7543
7544// Gets the next output from the given iterator as an Optional variant.
7545func IteratorGetNextAsOptional(scope *Scope, iterator tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (optional tf.Output) {
7546	if scope.Err() != nil {
7547		return
7548	}
7549	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
7550	opspec := tf.OpSpec{
7551		Type: "IteratorGetNextAsOptional",
7552		Input: []tf.Input{
7553			iterator,
7554		},
7555		Attrs: attrs,
7556	}
7557	op := scope.AddOperation(opspec)
7558	return op.Output(0)
7559}
7560
7561// Returns the value stored in an Optional variant or raises an error if none exists.
7562func OptionalGetValue(scope *Scope, optional tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (components []tf.Output) {
7563	if scope.Err() != nil {
7564		return
7565	}
7566	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
7567	opspec := tf.OpSpec{
7568		Type: "OptionalGetValue",
7569		Input: []tf.Input{
7570			optional,
7571		},
7572		Attrs: attrs,
7573	}
7574	op := scope.AddOperation(opspec)
7575	if scope.Err() != nil {
7576		return
7577	}
7578	var idx int
7579	var err error
7580	if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
7581		scope.UpdateErr("OptionalGetValue", err)
7582		return
7583	}
7584	return components
7585}
7586
7587// Returns true if and only if the given Optional variant has a value.
7588func OptionalHasValue(scope *Scope, optional tf.Output) (has_value tf.Output) {
7589	if scope.Err() != nil {
7590		return
7591	}
7592	opspec := tf.OpSpec{
7593		Type: "OptionalHasValue",
7594		Input: []tf.Input{
7595			optional,
7596		},
7597	}
7598	op := scope.AddOperation(opspec)
7599	return op.Output(0)
7600}
7601
7602// Deserializes a proto into the tree handle
7603//
7604// Arguments:
7605//	tree_handle: Handle to the tree resource to be restored.
7606//	tree_config: Serialied proto string of the boosted_trees.Tree proto.
7607//
7608// Returns the created operation.
7609func TensorForestTreeDeserialize(scope *Scope, tree_handle tf.Output, tree_config tf.Output) (o *tf.Operation) {
7610	if scope.Err() != nil {
7611		return
7612	}
7613	opspec := tf.OpSpec{
7614		Type: "TensorForestTreeDeserialize",
7615		Input: []tf.Input{
7616			tree_handle, tree_config,
7617		},
7618	}
7619	return scope.AddOperation(opspec)
7620}
7621
7622// Constructs an Optional variant from a tuple of tensors.
7623func OptionalFromValue(scope *Scope, components []tf.Output) (optional tf.Output) {
7624	if scope.Err() != nil {
7625		return
7626	}
7627	opspec := tf.OpSpec{
7628		Type: "OptionalFromValue",
7629		Input: []tf.Input{
7630			tf.OutputList(components),
7631		},
7632	}
7633	op := scope.AddOperation(opspec)
7634	return op.Output(0)
7635}
7636
7637// Creates a dataset by applying optimizations to `input_dataset`.
7638//
7639// Creates a dataset by applying optimizations to `input_dataset`.
7640//
7641// Arguments:
7642//	input_dataset: A variant tensor representing the input dataset.
7643//	optimizations: A `tf.string` vector `tf.Tensor` identifying optimizations to use.
7644//
7645//
7646func OptimizeDataset(scope *Scope, input_dataset tf.Output, optimizations tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
7647	if scope.Err() != nil {
7648		return
7649	}
7650	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
7651	opspec := tf.OpSpec{
7652		Type: "OptimizeDataset",
7653		Input: []tf.Input{
7654			input_dataset, optimizations,
7655		},
7656		Attrs: attrs,
7657	}
7658	op := scope.AddOperation(opspec)
7659	return op.Output(0)
7660}
7661
7662// Converts the given variant tensor to an iterator and stores it in the given resource.
7663//
7664// Arguments:
7665//	resource_handle: A handle to an iterator resource.
7666//	serialized: A variant tensor storing the state of the iterator contained in the
7667// resource.
7668//
7669// Returns the created operation.
7670func DeserializeIterator(scope *Scope, resource_handle tf.Output, serialized tf.Output) (o *tf.Operation) {
7671	if scope.Err() != nil {
7672		return
7673	}
7674	opspec := tf.OpSpec{
7675		Type: "DeserializeIterator",
7676		Input: []tf.Input{
7677			resource_handle, serialized,
7678		},
7679	}
7680	return scope.AddOperation(opspec)
7681}
7682
7683// Gather slices from `params` axis `axis` according to `indices`.
7684//
7685// `indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
7686// Produces an output tensor with shape `params.shape[:axis] + indices.shape +
7687// params.shape[axis + 1:]` where:
7688//
7689// ```python
7690//     # Scalar indices (output is rank(params) - 1).
7691//     output[a_0, ..., a_n, b_0, ..., b_n] =
7692//       params[a_0, ..., a_n, indices, b_0, ..., b_n]
7693//
7694//     # Vector indices (output is rank(params)).
7695//     output[a_0, ..., a_n, i, b_0, ..., b_n] =
7696//       params[a_0, ..., a_n, indices[i], b_0, ..., b_n]
7697//
7698//     # Higher rank indices (output is rank(params) + rank(indices) - 1).
7699//     output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] =
7700//       params[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n]
7701// ```
7702//
7703// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
7704// <img style="width:100%" src="https://www.tensorflow.org/images/Gather.png" alt>
7705// </div>
7706//
7707// Note that on CPU, if an out of bound index is found, an error is returned.
7708// On GPU, if an out of bound index is found, a 0 is stored in the
7709// corresponding output value.
7710//
7711// See also `tf.batch_gather` and `tf.gather_nd`.
7712//
7713// Arguments:
7714//	params: The tensor from which to gather values. Must be at least rank
7715// `axis + 1`.
7716//	indices: Index tensor. Must be in range `[0, params.shape[axis])`.
7717//	axis: The axis in `params` to gather `indices` from. Defaults to the first
7718// dimension. Supports negative indexes.
7719//
7720// Returns Values from `params` gathered from indices given by `indices`, with
7721// shape `params.shape[:axis] + indices.shape + params.shape[axis + 1:]`.
7722func GatherV2(scope *Scope, params tf.Output, indices tf.Output, axis tf.Output) (output tf.Output) {
7723	if scope.Err() != nil {
7724		return
7725	}
7726	opspec := tf.OpSpec{
7727		Type: "GatherV2",
7728		Input: []tf.Input{
7729			params, indices, axis,
7730		},
7731	}
7732	op := scope.AddOperation(opspec)
7733	return op.Output(0)
7734}
7735
7736// Converts the given `resource_handle` representing an iterator to a variant tensor.
7737//
7738// Arguments:
7739//	resource_handle: A handle to an iterator resource.
7740//
7741// Returns A variant tensor storing the state of the iterator contained in the
7742// resource.
7743func SerializeIterator(scope *Scope, resource_handle tf.Output) (serialized tf.Output) {
7744	if scope.Err() != nil {
7745		return
7746	}
7747	opspec := tf.OpSpec{
7748		Type: "SerializeIterator",
7749		Input: []tf.Input{
7750			resource_handle,
7751		},
7752	}
7753	op := scope.AddOperation(opspec)
7754	return op.Output(0)
7755}
7756
7757// Outputs a tensor containing the reduction across all input tensors.
7758//
7759// Outputs a tensor containing the reduction across all input tensors passed to ops
7760// within the same `shared_name.
7761//
7762// The graph should be constructed so if one op runs with shared_name value `c`,
7763// then `num_devices` ops will run with shared_name value `c`.  Failure to do so
7764// will cause the graph execution to fail to complete.
7765//
7766// input: the input to the reduction
7767// data: the value of the reduction across all `num_devices` devices.
7768// reduction: the reduction operation to perform.
7769// num_devices: The number of devices participating in this reduction.
7770// shared_name: Identifier that shared between ops of the same reduction.
7771func NcclAllReduce(scope *Scope, input tf.Output, reduction string, num_devices int64, shared_name string) (data tf.Output) {
7772	if scope.Err() != nil {
7773		return
7774	}
7775	attrs := map[string]interface{}{"reduction": reduction, "num_devices": num_devices, "shared_name": shared_name}
7776	opspec := tf.OpSpec{
7777		Type: "NcclAllReduce",
7778		Input: []tf.Input{
7779			input,
7780		},
7781		Attrs: attrs,
7782	}
7783	op := scope.AddOperation(opspec)
7784	return op.Output(0)
7785}
7786
7787// RegexReplaceAttr is an optional argument to RegexReplace.
7788type RegexReplaceAttr func(optionalAttr)
7789
7790// RegexReplaceReplaceGlobal sets the optional replace_global attribute to value.
7791//
7792// value: If True, the replacement is global (that is, all matches of the `pattern` regular
7793// expression in each input string are rewritten), otherwise the `rewrite`
7794// substitution is only made for the first `pattern` match.
7795// If not specified, defaults to true
7796func RegexReplaceReplaceGlobal(value bool) RegexReplaceAttr {
7797	return func(m optionalAttr) {
7798		m["replace_global"] = value
7799	}
7800}
7801
7802// Replaces matches of the `pattern` regular expression in `input` with the
7803// replacement string provided in `rewrite`.
7804//
7805// It follows the re2 syntax (https://github.com/google/re2/wiki/Syntax)
7806//
7807// Arguments:
7808//	input: The text to be processed.
7809//	pattern: The regular expression to be matched in the `input` strings.
7810//	rewrite: The rewrite string to be substituted for the `pattern` expression where it is
7811// matched in the `input` strings.
7812//
7813// Returns The text after applying pattern match and rewrite substitution.
7814func RegexReplace(scope *Scope, input tf.Output, pattern tf.Output, rewrite tf.Output, optional ...RegexReplaceAttr) (output tf.Output) {
7815	if scope.Err() != nil {
7816		return
7817	}
7818	attrs := map[string]interface{}{}
7819	for _, a := range optional {
7820		a(attrs)
7821	}
7822	opspec := tf.OpSpec{
7823		Type: "RegexReplace",
7824		Input: []tf.Input{
7825			input, pattern, rewrite,
7826		},
7827		Attrs: attrs,
7828	}
7829	op := scope.AddOperation(opspec)
7830	return op.Output(0)
7831}
7832
7833// Quantized Batch normalization.
7834//
7835// This op is deprecated and will be removed in the future. Prefer
7836// `tf.nn.batch_normalization`.
7837//
7838// Arguments:
7839//	t: A 4D input Tensor.
7840//	t_min: The value represented by the lowest quantized input.
7841//	t_max: The value represented by the highest quantized input.
7842//	m: A 1D mean Tensor with size matching the last dimension of t.
7843// This is the first output from tf.nn.moments,
7844// or a saved moving average thereof.
7845//	m_min: The value represented by the lowest quantized mean.
7846//	m_max: The value represented by the highest quantized mean.
7847//	v: A 1D variance Tensor with size matching the last dimension of t.
7848// This is the second output from tf.nn.moments,
7849// or a saved moving average thereof.
7850//	v_min: The value represented by the lowest quantized variance.
7851//	v_max: The value represented by the highest quantized variance.
7852//	beta: A 1D beta Tensor with size matching the last dimension of t.
7853// An offset to be added to the normalized tensor.
7854//	beta_min: The value represented by the lowest quantized offset.
7855//	beta_max: The value represented by the highest quantized offset.
7856//	gamma: A 1D gamma Tensor with size matching the last dimension of t.
7857// If "scale_after_normalization" is true, this tensor will be multiplied
7858// with the normalized tensor.
7859//	gamma_min: The value represented by the lowest quantized gamma.
7860//	gamma_max: The value represented by the highest quantized gamma.
7861//
7862//	variance_epsilon: A small float number to avoid dividing by 0.
7863//	scale_after_normalization: A bool indicating whether the resulted tensor
7864// needs to be multiplied with gamma.
7865func QuantizedBatchNormWithGlobalNormalization(scope *Scope, t tf.Output, t_min tf.Output, t_max tf.Output, m tf.Output, m_min tf.Output, m_max tf.Output, v tf.Output, v_min tf.Output, v_max tf.Output, beta tf.Output, beta_min tf.Output, beta_max tf.Output, gamma tf.Output, gamma_min tf.Output, gamma_max tf.Output, out_type tf.DataType, variance_epsilon float32, scale_after_normalization bool) (result tf.Output, result_min tf.Output, result_max tf.Output) {
7866	if scope.Err() != nil {
7867		return
7868	}
7869	attrs := map[string]interface{}{"out_type": out_type, "variance_epsilon": variance_epsilon, "scale_after_normalization": scale_after_normalization}
7870	opspec := tf.OpSpec{
7871		Type: "QuantizedBatchNormWithGlobalNormalization",
7872		Input: []tf.Input{
7873			t, t_min, t_max, m, m_min, m_max, v, v_min, v_max, beta, beta_min, beta_max, gamma, gamma_min, gamma_max,
7874		},
7875		Attrs: attrs,
7876	}
7877	op := scope.AddOperation(opspec)
7878	return op.Output(0), op.Output(1), op.Output(2)
7879}
7880
7881// Adds Tensor 'bias' to Tensor 'input' for Quantized types.
7882//
7883// Broadcasts the values of bias on dimensions 0..N-2 of 'input'.
7884//
7885// Arguments:
7886//
7887//	bias: A 1D bias Tensor with size matching the last dimension of 'input'.
7888//	min_input: The float value that the lowest quantized input value represents.
7889//	max_input: The float value that the highest quantized input value represents.
7890//	min_bias: The float value that the lowest quantized bias value represents.
7891//	max_bias: The float value that the highest quantized bias value represents.
7892//
7893//
7894// Returns The float value that the lowest quantized output value represents.The float value that the highest quantized output value represents.
7895func QuantizedBiasAdd(scope *Scope, input tf.Output, bias tf.Output, min_input tf.Output, max_input tf.Output, min_bias tf.Output, max_bias tf.Output, out_type tf.DataType) (output tf.Output, min_out tf.Output, max_out tf.Output) {
7896	if scope.Err() != nil {
7897		return
7898	}
7899	attrs := map[string]interface{}{"out_type": out_type}
7900	opspec := tf.OpSpec{
7901		Type: "QuantizedBiasAdd",
7902		Input: []tf.Input{
7903			input, bias, min_input, max_input, min_bias, max_bias,
7904		},
7905		Attrs: attrs,
7906	}
7907	op := scope.AddOperation(opspec)
7908	return op.Output(0), op.Output(1), op.Output(2)
7909}
7910
7911// Produces the average pool of the input tensor for quantized types.
7912//
7913// Arguments:
7914//	input: 4-D with shape `[batch, height, width, channels]`.
7915//	min_input: The float value that the lowest quantized input value represents.
7916//	max_input: The float value that the highest quantized input value represents.
7917//	ksize: The size of the window for each dimension of the input tensor.
7918// The length must be 4 to match the number of dimensions of the input.
7919//	strides: The stride of the sliding window for each dimension of the input
7920// tensor.  The length must be 4 to match the number of dimensions of the input.
7921//	padding: The type of padding algorithm to use.
7922//
7923// Returns The float value that the lowest quantized output value represents.The float value that the highest quantized output value represents.
7924func QuantizedAvgPool(scope *Scope, input tf.Output, min_input tf.Output, max_input tf.Output, ksize []int64, strides []int64, padding string) (output tf.Output, min_output tf.Output, max_output tf.Output) {
7925	if scope.Err() != nil {
7926		return
7927	}
7928	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
7929	opspec := tf.OpSpec{
7930		Type: "QuantizedAvgPool",
7931		Input: []tf.Input{
7932			input, min_input, max_input,
7933		},
7934		Attrs: attrs,
7935	}
7936	op := scope.AddOperation(opspec)
7937	return op.Output(0), op.Output(1), op.Output(2)
7938}
7939
7940// Extract `patches` from `input` and put them in the "depth" output dimension. 3D extension of `extract_image_patches`.
7941//
7942// Arguments:
7943//	input: 5-D Tensor with shape `[batch, in_planes, in_rows, in_cols, depth]`.
7944//	ksizes: The size of the sliding window for each dimension of `input`.
7945//	strides: 1-D of length 5. How far the centers of two consecutive patches are in
7946// `input`. Must be: `[1, stride_planes, stride_rows, stride_cols, 1]`.
7947//	padding: The type of padding algorithm to use.
7948//
7949// We specify the size-related attributes as:
7950//
7951// ```python
7952//       ksizes = [1, ksize_planes, ksize_rows, ksize_cols, 1]
7953//       strides = [1, stride_planes, strides_rows, strides_cols, 1]
7954// ```
7955//
7956// Returns 5-D Tensor with shape `[batch, out_planes, out_rows, out_cols,
7957// ksize_planes * ksize_rows * ksize_cols * depth]` containing patches
7958// with size `ksize_planes x ksize_rows x ksize_cols x depth` vectorized
7959// in the "depth" dimension. Note `out_planes`, `out_rows` and `out_cols`
7960// are the dimensions of the output patches.
7961func ExtractVolumePatches(scope *Scope, input tf.Output, ksizes []int64, strides []int64, padding string) (patches tf.Output) {
7962	if scope.Err() != nil {
7963		return
7964	}
7965	attrs := map[string]interface{}{"ksizes": ksizes, "strides": strides, "padding": padding}
7966	opspec := tf.OpSpec{
7967		Type: "ExtractVolumePatches",
7968		Input: []tf.Input{
7969			input,
7970		},
7971		Attrs: attrs,
7972	}
7973	op := scope.AddOperation(opspec)
7974	return op.Output(0)
7975}
7976
7977// FractionalAvgPoolAttr is an optional argument to FractionalAvgPool.
7978type FractionalAvgPoolAttr func(optionalAttr)
7979
7980// FractionalAvgPoolPseudoRandom sets the optional pseudo_random attribute to value.
7981//
7982// value: When set to True, generates the pooling sequence in a
7983// pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin
7984// Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for
7985// difference between pseudorandom and random.
7986// If not specified, defaults to false
7987func FractionalAvgPoolPseudoRandom(value bool) FractionalAvgPoolAttr {
7988	return func(m optionalAttr) {
7989		m["pseudo_random"] = value
7990	}
7991}
7992
7993// FractionalAvgPoolOverlapping sets the optional overlapping attribute to value.
7994//
7995// value: When set to True, it means when pooling, the values at the boundary
7996// of adjacent pooling cells are used by both cells. For example:
7997//
7998// `index  0  1  2  3  4`
7999//
8000// `value  20 5  16 3  7`
8001//
8002// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
8003// The result would be [41/3, 26/3] for fractional avg pooling.
8004// If not specified, defaults to false
8005func FractionalAvgPoolOverlapping(value bool) FractionalAvgPoolAttr {
8006	return func(m optionalAttr) {
8007		m["overlapping"] = value
8008	}
8009}
8010
8011// FractionalAvgPoolDeterministic sets the optional deterministic attribute to value.
8012//
8013// value: When set to True, a fixed pooling region will be used when
8014// iterating over a FractionalAvgPool node in the computation graph. Mainly used
8015// in unit test to make FractionalAvgPool deterministic.
8016// If not specified, defaults to false
8017func FractionalAvgPoolDeterministic(value bool) FractionalAvgPoolAttr {
8018	return func(m optionalAttr) {
8019		m["deterministic"] = value
8020	}
8021}
8022
8023// FractionalAvgPoolSeed sets the optional seed attribute to value.
8024//
8025// value: If either seed or seed2 are set to be non-zero, the random number
8026// generator is seeded by the given seed.  Otherwise, it is seeded by a
8027// random seed.
8028// If not specified, defaults to 0
8029func FractionalAvgPoolSeed(value int64) FractionalAvgPoolAttr {
8030	return func(m optionalAttr) {
8031		m["seed"] = value
8032	}
8033}
8034
8035// FractionalAvgPoolSeed2 sets the optional seed2 attribute to value.
8036//
8037// value: An second seed to avoid seed collision.
8038// If not specified, defaults to 0
8039func FractionalAvgPoolSeed2(value int64) FractionalAvgPoolAttr {
8040	return func(m optionalAttr) {
8041		m["seed2"] = value
8042	}
8043}
8044
8045// Performs fractional average pooling on the input.
8046//
8047// Fractional average pooling is similar to Fractional max pooling in the pooling
8048// region generation step. The only difference is that after pooling regions are
8049// generated, a mean operation is performed instead of a max operation in each
8050// pooling region.
8051//
8052// Arguments:
8053//	value: 4-D with shape `[batch, height, width, channels]`.
8054//	pooling_ratio: Pooling ratio for each dimension of `value`, currently only
8055// supports row and col dimension and should be >= 1.0. For example, a valid
8056// pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements
8057// must be 1.0 because we don't allow pooling on batch and channels
8058// dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions
8059// respectively.
8060//
8061// Returns output tensor after fractional avg pooling.row pooling sequence, needed to calculate gradient.column pooling sequence, needed to calculate gradient.
8062func FractionalAvgPool(scope *Scope, value tf.Output, pooling_ratio []float32, optional ...FractionalAvgPoolAttr) (output tf.Output, row_pooling_sequence tf.Output, col_pooling_sequence tf.Output) {
8063	if scope.Err() != nil {
8064		return
8065	}
8066	attrs := map[string]interface{}{"pooling_ratio": pooling_ratio}
8067	for _, a := range optional {
8068		a(attrs)
8069	}
8070	opspec := tf.OpSpec{
8071		Type: "FractionalAvgPool",
8072		Input: []tf.Input{
8073			value,
8074		},
8075		Attrs: attrs,
8076	}
8077	op := scope.AddOperation(opspec)
8078	return op.Output(0), op.Output(1), op.Output(2)
8079}
8080
8081// RandomCropAttr is an optional argument to RandomCrop.
8082type RandomCropAttr func(optionalAttr)
8083
8084// RandomCropSeed sets the optional seed attribute to value.
8085//
8086// value: If either seed or seed2 are set to be non-zero, the random number
8087// generator is seeded by the given seed.  Otherwise, it is seeded by a
8088// random seed.
8089// If not specified, defaults to 0
8090func RandomCropSeed(value int64) RandomCropAttr {
8091	return func(m optionalAttr) {
8092		m["seed"] = value
8093	}
8094}
8095
8096// RandomCropSeed2 sets the optional seed2 attribute to value.
8097//
8098// value: An second seed to avoid seed collision.
8099// If not specified, defaults to 0
8100func RandomCropSeed2(value int64) RandomCropAttr {
8101	return func(m optionalAttr) {
8102		m["seed2"] = value
8103	}
8104}
8105
8106// Randomly crop `image`.
8107//
8108// DEPRECATED at GraphDef version 8: Random crop is now pure Python
8109//
8110// `size` is a 1-D int64 tensor with 2 elements representing the crop height and
8111// width.  The values must be non negative.
8112//
8113// This Op picks a random location in `image` and crops a `height` by `width`
8114// rectangle from that location.  The random location is picked so the cropped
8115// area will fit inside the original image.
8116//
8117// Arguments:
8118//	image: 3-D of shape `[height, width, channels]`.
8119//	size: 1-D of length 2 containing: `crop_height`, `crop_width`..
8120//
8121// Returns 3-D of shape `[crop_height, crop_width, channels].`
8122func RandomCrop(scope *Scope, image tf.Output, size tf.Output, optional ...RandomCropAttr) (output tf.Output) {
8123	if scope.Err() != nil {
8124		return
8125	}
8126	attrs := map[string]interface{}{}
8127	for _, a := range optional {
8128		a(attrs)
8129	}
8130	opspec := tf.OpSpec{
8131		Type: "RandomCrop",
8132		Input: []tf.Input{
8133			image, size,
8134		},
8135		Attrs: attrs,
8136	}
8137	op := scope.AddOperation(opspec)
8138	return op.Output(0)
8139}
8140
8141// TopKV2Attr is an optional argument to TopKV2.
8142type TopKV2Attr func(optionalAttr)
8143
8144// TopKV2Sorted sets the optional sorted attribute to value.
8145//
8146// value: If true the resulting `k` elements will be sorted by the values in
8147// descending order.
8148// If not specified, defaults to true
8149func TopKV2Sorted(value bool) TopKV2Attr {
8150	return func(m optionalAttr) {
8151		m["sorted"] = value
8152	}
8153}
8154
8155// Finds values and indices of the `k` largest elements for the last dimension.
8156//
8157// If the input is a vector (rank-1), finds the `k` largest entries in the vector
8158// and outputs their values and indices as vectors.  Thus `values[j]` is the
8159// `j`-th largest entry in `input`, and its index is `indices[j]`.
8160//
8161// For matrices (resp. higher rank input), computes the top `k` entries in each
8162// row (resp. vector along the last dimension).  Thus,
8163//
8164//     values.shape = indices.shape = input.shape[:-1] + [k]
8165//
8166// If two elements are equal, the lower-index element appears first.
8167//
8168// Arguments:
8169//	input: 1-D or higher with last dimension at least `k`.
8170//	k: 0-D.  Number of top elements to look for along the last dimension (along each
8171// row for matrices).
8172//
8173// Returns The `k` largest elements along each last dimensional slice.The indices of `values` within the last dimension of `input`.
8174func TopKV2(scope *Scope, input tf.Output, k tf.Output, optional ...TopKV2Attr) (values tf.Output, indices tf.Output) {
8175	if scope.Err() != nil {
8176		return
8177	}
8178	attrs := map[string]interface{}{}
8179	for _, a := range optional {
8180		a(attrs)
8181	}
8182	opspec := tf.OpSpec{
8183		Type: "TopKV2",
8184		Input: []tf.Input{
8185			input, k,
8186		},
8187		Attrs: attrs,
8188	}
8189	op := scope.AddOperation(opspec)
8190	return op.Output(0), op.Output(1)
8191}
8192
8193// Returns x // y element-wise.
8194//
8195// *NOTE*: `FloorDiv` supports broadcasting. More about broadcasting
8196// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
8197func FloorDiv(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
8198	if scope.Err() != nil {
8199		return
8200	}
8201	opspec := tf.OpSpec{
8202		Type: "FloorDiv",
8203		Input: []tf.Input{
8204			x, y,
8205		},
8206	}
8207	op := scope.AddOperation(opspec)
8208	return op.Output(0)
8209}
8210
8211// Computes the inverse permutation of a tensor.
8212//
8213// This operation computes the inverse of an index permutation. It takes a 1-D
8214// integer tensor `x`, which represents the indices of a zero-based array, and
8215// swaps each value with its index position. In other words, for an output tensor
8216// `y` and an input tensor `x`, this operation computes the following:
8217//
8218// `y[x[i]] = i for i in [0, 1, ..., len(x) - 1]`
8219//
8220// The values must include 0. There can be no duplicate values or negative values.
8221//
8222// For example:
8223//
8224// ```
8225// # tensor `x` is [3, 4, 0, 2, 1]
8226// invert_permutation(x) ==> [2, 4, 3, 0, 1]
8227// ```
8228//
8229// Arguments:
8230//	x: 1-D.
8231//
8232// Returns 1-D.
8233func InvertPermutation(scope *Scope, x tf.Output) (y tf.Output) {
8234	if scope.Err() != nil {
8235		return
8236	}
8237	opspec := tf.OpSpec{
8238		Type: "InvertPermutation",
8239		Input: []tf.Input{
8240			x,
8241		},
8242	}
8243	op := scope.AddOperation(opspec)
8244	return op.Output(0)
8245}
8246
8247// Computes log softmax activations.
8248//
8249// For each batch `i` and class `j` we have
8250//
8251//     logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i])))
8252//
8253// Arguments:
8254//	logits: 2-D with shape `[batch_size, num_classes]`.
8255//
8256// Returns Same shape as `logits`.
8257func LogSoftmax(scope *Scope, logits tf.Output) (logsoftmax tf.Output) {
8258	if scope.Err() != nil {
8259		return
8260	}
8261	opspec := tf.OpSpec{
8262		Type: "LogSoftmax",
8263		Input: []tf.Input{
8264			logits,
8265		},
8266	}
8267	op := scope.AddOperation(opspec)
8268	return op.Output(0)
8269}
8270
8271// Computes softmax activations.
8272//
8273// For each batch `i` and class `j` we have
8274//
8275//     $$softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))$$
8276//
8277// Arguments:
8278//	logits: 2-D with shape `[batch_size, num_classes]`.
8279//
8280// Returns Same shape as `logits`.
8281func Softmax(scope *Scope, logits tf.Output) (softmax tf.Output) {
8282	if scope.Err() != nil {
8283		return
8284	}
8285	opspec := tf.OpSpec{
8286		Type: "Softmax",
8287		Input: []tf.Input{
8288			logits,
8289		},
8290	}
8291	op := scope.AddOperation(opspec)
8292	return op.Output(0)
8293}
8294
8295// DecodeBmpAttr is an optional argument to DecodeBmp.
8296type DecodeBmpAttr func(optionalAttr)
8297
8298// DecodeBmpChannels sets the optional channels attribute to value.
8299// If not specified, defaults to 0
8300func DecodeBmpChannels(value int64) DecodeBmpAttr {
8301	return func(m optionalAttr) {
8302		m["channels"] = value
8303	}
8304}
8305
8306// Decode the first frame of a BMP-encoded image to a uint8 tensor.
8307//
8308// The attr `channels` indicates the desired number of color channels for the
8309// decoded image.
8310//
8311// Accepted values are:
8312//
8313// *   0: Use the number of channels in the BMP-encoded image.
8314// *   3: output an RGB image.
8315// *   4: output an RGBA image.
8316//
8317// Arguments:
8318//	contents: 0-D.  The BMP-encoded image.
8319//
8320// Returns 3-D with shape `[height, width, channels]`. RGB order
8321func DecodeBmp(scope *Scope, contents tf.Output, optional ...DecodeBmpAttr) (image tf.Output) {
8322	if scope.Err() != nil {
8323		return
8324	}
8325	attrs := map[string]interface{}{}
8326	for _, a := range optional {
8327		a(attrs)
8328	}
8329	opspec := tf.OpSpec{
8330		Type: "DecodeBmp",
8331		Input: []tf.Input{
8332			contents,
8333		},
8334		Attrs: attrs,
8335	}
8336	op := scope.AddOperation(opspec)
8337	return op.Output(0)
8338}
8339
8340// Computes exponential linear: `exp(features) - 1` if < 0, `features` otherwise.
8341//
8342// See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)
8343// ](http://arxiv.org/abs/1511.07289)
8344func Elu(scope *Scope, features tf.Output) (activations tf.Output) {
8345	if scope.Err() != nil {
8346		return
8347	}
8348	opspec := tf.OpSpec{
8349		Type: "Elu",
8350		Input: []tf.Input{
8351			features,
8352		},
8353	}
8354	op := scope.AddOperation(opspec)
8355	return op.Output(0)
8356}
8357
8358// Computes square of x element-wise.
8359//
8360// I.e., \\(y = x * x = x^2\\).
8361func Square(scope *Scope, x tf.Output) (y tf.Output) {
8362	if scope.Err() != nil {
8363		return
8364	}
8365	opspec := tf.OpSpec{
8366		Type: "Square",
8367		Input: []tf.Input{
8368			x,
8369		},
8370	}
8371	op := scope.AddOperation(opspec)
8372	return op.Output(0)
8373}
8374
8375// LeakyReluGradAttr is an optional argument to LeakyReluGrad.
8376type LeakyReluGradAttr func(optionalAttr)
8377
8378// LeakyReluGradAlpha sets the optional alpha attribute to value.
8379// If not specified, defaults to 0.2
8380func LeakyReluGradAlpha(value float32) LeakyReluGradAttr {
8381	return func(m optionalAttr) {
8382		m["alpha"] = value
8383	}
8384}
8385
8386// Computes rectified linear gradients for a LeakyRelu operation.
8387//
8388// Arguments:
8389//	gradients: The backpropagated gradients to the corresponding LeakyRelu operation.
8390//	features: The features passed as input to the corresponding LeakyRelu operation,
8391// OR the outputs of that operation (both work equivalently).
8392//
8393// Returns `gradients * (features > 0) + alpha * gradients * (featurs <= 0)`.
8394func LeakyReluGrad(scope *Scope, gradients tf.Output, features tf.Output, optional ...LeakyReluGradAttr) (backprops tf.Output) {
8395	if scope.Err() != nil {
8396		return
8397	}
8398	attrs := map[string]interface{}{}
8399	for _, a := range optional {
8400		a(attrs)
8401	}
8402	opspec := tf.OpSpec{
8403		Type: "LeakyReluGrad",
8404		Input: []tf.Input{
8405			gradients, features,
8406		},
8407		Attrs: attrs,
8408	}
8409	op := scope.AddOperation(opspec)
8410	return op.Output(0)
8411}
8412
8413// Computes rectified linear 6: `min(max(features, 0), 6)`.
8414func Relu6(scope *Scope, features tf.Output) (activations tf.Output) {
8415	if scope.Err() != nil {
8416		return
8417	}
8418	opspec := tf.OpSpec{
8419		Type: "Relu6",
8420		Input: []tf.Input{
8421			features,
8422		},
8423	}
8424	op := scope.AddOperation(opspec)
8425	return op.Output(0)
8426}
8427
8428// SdcaOptimizerV2Attr is an optional argument to SdcaOptimizerV2.
8429type SdcaOptimizerV2Attr func(optionalAttr)
8430
8431// SdcaOptimizerV2Adaptive sets the optional adaptive attribute to value.
8432//
8433// value: Whether to use Adaptive SDCA for the inner loop.
8434// If not specified, defaults to true
8435func SdcaOptimizerV2Adaptive(value bool) SdcaOptimizerV2Attr {
8436	return func(m optionalAttr) {
8437		m["adaptive"] = value
8438	}
8439}
8440
8441// Distributed version of Stochastic Dual Coordinate Ascent (SDCA) optimizer for
8442//
8443// linear models with L1 + L2 regularization. As global optimization objective is
8444// strongly-convex, the optimizer optimizes the dual objective at each step. The
8445// optimizer applies each update one example at a time. Examples are sampled
8446// uniformly, and the optimizer is learning rate free and enjoys linear convergence
8447// rate.
8448//
8449// [Proximal Stochastic Dual Coordinate Ascent](http://arxiv.org/pdf/1211.2717v1.pdf).<br>
8450// Shai Shalev-Shwartz, Tong Zhang. 2012
8451//
8452// $$Loss Objective = \sum f_{i} (wx_{i}) + (l2 / 2) * |w|^2 + l1 * |w|$$
8453//
8454// [Adding vs. Averaging in Distributed Primal-Dual Optimization](http://arxiv.org/abs/1502.03508).<br>
8455// Chenxin Ma, Virginia Smith, Martin Jaggi, Michael I. Jordan,
8456// Peter Richtarik, Martin Takac. 2015
8457//
8458// [Stochastic Dual Coordinate Ascent with Adaptive Probabilities](https://arxiv.org/abs/1502.08053).<br>
8459// Dominik Csiba, Zheng Qu, Peter Richtarik. 2015
8460//
8461// Arguments:
8462//	sparse_example_indices: a list of vectors which contain example indices.
8463//	sparse_feature_indices: a list of vectors which contain feature indices.
8464//	sparse_feature_values: a list of vectors which contains feature value
8465// associated with each feature group.
8466//	dense_features: a list of matrices which contains the dense feature values.
8467//	example_weights: a vector which contains the weight associated with each
8468// example.
8469//	example_labels: a vector which contains the label/target associated with each
8470// example.
8471//	sparse_indices: a list of vectors where each value is the indices which has
8472// corresponding weights in sparse_weights. This field maybe omitted for the
8473// dense approach.
8474//	sparse_weights: a list of vectors where each value is the weight associated with
8475// a sparse feature group.
8476//	dense_weights: a list of vectors where the values are the weights associated
8477// with a dense feature group.
8478//	example_state_data: a list of vectors containing the example state data.
8479//	loss_type: Type of the primal loss. Currently SdcaSolver supports logistic,
8480// squared and hinge losses.
8481//	l1: Symmetric l1 regularization strength.
8482//	l2: Symmetric l2 regularization strength.
8483//	num_loss_partitions: Number of partitions of the global loss function.
8484//	num_inner_iterations: Number of iterations per mini-batch.
8485//
8486// Returns a list of vectors containing the updated example state
8487// data.a list of vectors where each value is the delta
8488// weights associated with a sparse feature group.a list of vectors where the values are the delta
8489// weights associated with a dense feature group.
8490func SdcaOptimizerV2(scope *Scope, sparse_example_indices []tf.Output, sparse_feature_indices []tf.Output, sparse_feature_values []tf.Output, dense_features []tf.Output, example_weights tf.Output, example_labels tf.Output, sparse_indices []tf.Output, sparse_weights []tf.Output, dense_weights []tf.Output, example_state_data tf.Output, loss_type string, l1 float32, l2 float32, num_loss_partitions int64, num_inner_iterations int64, optional ...SdcaOptimizerV2Attr) (out_example_state_data tf.Output, out_delta_sparse_weights []tf.Output, out_delta_dense_weights []tf.Output) {
8491	if scope.Err() != nil {
8492		return
8493	}
8494	attrs := map[string]interface{}{"loss_type": loss_type, "l1": l1, "l2": l2, "num_loss_partitions": num_loss_partitions, "num_inner_iterations": num_inner_iterations}
8495	for _, a := range optional {
8496		a(attrs)
8497	}
8498	opspec := tf.OpSpec{
8499		Type: "SdcaOptimizerV2",
8500		Input: []tf.Input{
8501			tf.OutputList(sparse_example_indices), tf.OutputList(sparse_feature_indices), tf.OutputList(sparse_feature_values), tf.OutputList(dense_features), example_weights, example_labels, tf.OutputList(sparse_indices), tf.OutputList(sparse_weights), tf.OutputList(dense_weights), example_state_data,
8502		},
8503		Attrs: attrs,
8504	}
8505	op := scope.AddOperation(opspec)
8506	if scope.Err() != nil {
8507		return
8508	}
8509	var idx int
8510	var err error
8511	out_example_state_data = op.Output(idx)
8512	if out_delta_sparse_weights, idx, err = makeOutputList(op, idx, "out_delta_sparse_weights"); err != nil {
8513		scope.UpdateErr("SdcaOptimizerV2", err)
8514		return
8515	}
8516	if out_delta_dense_weights, idx, err = makeOutputList(op, idx, "out_delta_dense_weights"); err != nil {
8517		scope.UpdateErr("SdcaOptimizerV2", err)
8518		return
8519	}
8520	return out_example_state_data, out_delta_sparse_weights, out_delta_dense_weights
8521}
8522
8523// Computes the minimum along segments of a tensor.
8524//
8525// Read
8526// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
8527// for an explanation of segments.
8528//
8529// This operator is similar to the unsorted segment sum operator found
8530// [(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).
8531// Instead of computing the sum over segments, it computes the minimum such that:
8532//
8533// \\(output_i = \min_{j...} data_[j...]\\) where min is over tuples `j...` such
8534// that `segment_ids[j...] == i`.
8535//
8536// If the minimum is empty for a given segment ID `i`, it outputs the largest
8537// possible value for the specific numeric type,
8538// `output[i] = numeric_limits<T>::max()`.
8539//
8540// For example:
8541//
8542// ``` python
8543// c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]])
8544// tf.unsorted_segment_min(c, tf.constant([0, 1, 0]), num_segments=2)
8545// # ==> [[ 1,  2, 2, 1],
8546// #       [5,  6, 7, 8]]
8547// ```
8548//
8549// If the given segment ID `i` is negative, then the corresponding value is
8550// dropped, and will not be included in the result.
8551//
8552// Arguments:
8553//
8554//	segment_ids: A tensor whose shape is a prefix of `data.shape`.
8555//
8556//
8557// Returns Has same shape as data, except for the first `segment_ids.rank`
8558// dimensions, which are replaced with a single dimension which has size
8559// `num_segments`.
8560func UnsortedSegmentMin(scope *Scope, data tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output) {
8561	if scope.Err() != nil {
8562		return
8563	}
8564	opspec := tf.OpSpec{
8565		Type: "UnsortedSegmentMin",
8566		Input: []tf.Input{
8567			data, segment_ids, num_segments,
8568		},
8569	}
8570	op := scope.AddOperation(opspec)
8571	return op.Output(0)
8572}
8573
8574// Computes rectified linear gradients for a Relu operation.
8575//
8576// Arguments:
8577//	gradients: The backpropagated gradients to the corresponding Relu operation.
8578//	features: The features passed as input to the corresponding Relu operation, OR
8579// the outputs of that operation (both work equivalently).
8580//
8581// Returns `gradients * (features > 0)`.
8582func ReluGrad(scope *Scope, gradients tf.Output, features tf.Output) (backprops tf.Output) {
8583	if scope.Err() != nil {
8584		return
8585	}
8586	opspec := tf.OpSpec{
8587		Type: "ReluGrad",
8588		Input: []tf.Input{
8589			gradients, features,
8590		},
8591	}
8592	op := scope.AddOperation(opspec)
8593	return op.Output(0)
8594}
8595
8596// TensorArrayGatherV2Attr is an optional argument to TensorArrayGatherV2.
8597type TensorArrayGatherV2Attr func(optionalAttr)
8598
8599// TensorArrayGatherV2ElementShape sets the optional element_shape attribute to value.
8600// If not specified, defaults to <unknown_rank:true >
8601func TensorArrayGatherV2ElementShape(value tf.Shape) TensorArrayGatherV2Attr {
8602	return func(m optionalAttr) {
8603		m["element_shape"] = value
8604	}
8605}
8606
8607// Deprecated. Use TensorArrayGatherV3
8608//
8609// DEPRECATED at GraphDef version 26: Use TensorArrayGatherV3
8610func TensorArrayGatherV2(scope *Scope, handle tf.Output, indices tf.Output, flow_in tf.Output, dtype tf.DataType, optional ...TensorArrayGatherV2Attr) (value tf.Output) {
8611	if scope.Err() != nil {
8612		return
8613	}
8614	attrs := map[string]interface{}{"dtype": dtype}
8615	for _, a := range optional {
8616		a(attrs)
8617	}
8618	opspec := tf.OpSpec{
8619		Type: "TensorArrayGatherV2",
8620		Input: []tf.Input{
8621			handle, indices, flow_in,
8622		},
8623		Attrs: attrs,
8624	}
8625	op := scope.AddOperation(opspec)
8626	return op.Output(0)
8627}
8628
8629// Returns the truth value of (x == y) element-wise.
8630//
8631// *NOTE*: `Equal` supports broadcasting. More about broadcasting
8632// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
8633func Equal(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
8634	if scope.Err() != nil {
8635		return
8636	}
8637	opspec := tf.OpSpec{
8638		Type: "Equal",
8639		Input: []tf.Input{
8640			x, y,
8641		},
8642	}
8643	op := scope.AddOperation(opspec)
8644	return op.Output(0)
8645}
8646
8647// Compute the polygamma function \\(\psi^{(n)}(x)\\).
8648//
8649// The polygamma function is defined as:
8650//
8651//
8652// \\(\psi^{(a)}(x) = \frac{d^a}{dx^a} \psi(x)\\)
8653//
8654// where \\(\psi(x)\\) is the digamma function.
8655// The polygamma function is defined only for non-negative integer orders \\a\\.
8656func Polygamma(scope *Scope, a tf.Output, x tf.Output) (z tf.Output) {
8657	if scope.Err() != nil {
8658		return
8659	}
8660	opspec := tf.OpSpec{
8661		Type: "Polygamma",
8662		Input: []tf.Input{
8663			a, x,
8664		},
8665	}
8666	op := scope.AddOperation(opspec)
8667	return op.Output(0)
8668}
8669
8670// MaxPoolGradGradV2Attr is an optional argument to MaxPoolGradGradV2.
8671type MaxPoolGradGradV2Attr func(optionalAttr)
8672
8673// MaxPoolGradGradV2DataFormat sets the optional data_format attribute to value.
8674//
8675// value: Specify the data format of the input and output data. With the
8676// default format "NHWC", the data is stored in the order of:
8677//     [batch, in_height, in_width, in_channels].
8678// Alternatively, the format could be "NCHW", the data storage order of:
8679//     [batch, in_channels, in_height, in_width].
8680// If not specified, defaults to "NHWC"
8681func MaxPoolGradGradV2DataFormat(value string) MaxPoolGradGradV2Attr {
8682	return func(m optionalAttr) {
8683		m["data_format"] = value
8684	}
8685}
8686
8687// Computes second-order gradients of the maxpooling function.
8688//
8689// Arguments:
8690//	orig_input: The original input tensor.
8691//	orig_output: The original output tensor.
8692//	grad: 4-D.  Gradients of gradients w.r.t. the input of `max_pool`.
8693//	ksize: The size of the window for each dimension of the input tensor.
8694//	strides: The stride of the sliding window for each dimension of the
8695// input tensor.
8696//	padding: The type of padding algorithm to use.
8697//
8698// Returns Gradients of gradients w.r.t. the input to `max_pool`.
8699func MaxPoolGradGradV2(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize tf.Output, strides tf.Output, padding string, optional ...MaxPoolGradGradV2Attr) (output tf.Output) {
8700	if scope.Err() != nil {
8701		return
8702	}
8703	attrs := map[string]interface{}{"padding": padding}
8704	for _, a := range optional {
8705		a(attrs)
8706	}
8707	opspec := tf.OpSpec{
8708		Type: "MaxPoolGradGradV2",
8709		Input: []tf.Input{
8710			orig_input, orig_output, grad, ksize, strides,
8711		},
8712		Attrs: attrs,
8713	}
8714	op := scope.AddOperation(opspec)
8715	return op.Output(0)
8716}
8717
8718// MaxPoolGradWithArgmaxAttr is an optional argument to MaxPoolGradWithArgmax.
8719type MaxPoolGradWithArgmaxAttr func(optionalAttr)
8720
8721// MaxPoolGradWithArgmaxIncludeBatchInIndex sets the optional include_batch_in_index attribute to value.
8722//
8723// value: Whether to include batch dimension in flattened index of `argmax`.
8724// If not specified, defaults to false
8725func MaxPoolGradWithArgmaxIncludeBatchInIndex(value bool) MaxPoolGradWithArgmaxAttr {
8726	return func(m optionalAttr) {
8727		m["include_batch_in_index"] = value
8728	}
8729}
8730
8731// Computes gradients of the maxpooling function.
8732//
8733// Arguments:
8734//	input: The original input.
8735//	grad: 4-D with shape `[batch, height, width, channels]`.  Gradients w.r.t. the
8736// output of `max_pool`.
8737//	argmax: The indices of the maximum values chosen for each output of `max_pool`.
8738//	ksize: The size of the window for each dimension of the input tensor.
8739//	strides: The stride of the sliding window for each dimension of the
8740// input tensor.
8741//	padding: The type of padding algorithm to use.
8742//
8743// Returns Gradients w.r.t. the input of `max_pool`.
8744func MaxPoolGradWithArgmax(scope *Scope, input tf.Output, grad tf.Output, argmax tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPoolGradWithArgmaxAttr) (output tf.Output) {
8745	if scope.Err() != nil {
8746		return
8747	}
8748	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
8749	for _, a := range optional {
8750		a(attrs)
8751	}
8752	opspec := tf.OpSpec{
8753		Type: "MaxPoolGradWithArgmax",
8754		Input: []tf.Input{
8755			input, grad, argmax,
8756		},
8757		Attrs: attrs,
8758	}
8759	op := scope.AddOperation(opspec)
8760	return op.Output(0)
8761}
8762
8763// MutexV2Attr is an optional argument to MutexV2.
8764type MutexV2Attr func(optionalAttr)
8765
8766// MutexV2Container sets the optional container attribute to value.
8767//
8768// value: If non-empty, this variable is placed in the given container.
8769// Otherwise, a default container is used.
8770// If not specified, defaults to ""
8771func MutexV2Container(value string) MutexV2Attr {
8772	return func(m optionalAttr) {
8773		m["container"] = value
8774	}
8775}
8776
8777// MutexV2SharedName sets the optional shared_name attribute to value.
8778//
8779// value: If non-empty, this variable is named in the given bucket
8780// with this shared_name. Otherwise, the node name is used instead.
8781// If not specified, defaults to ""
8782func MutexV2SharedName(value string) MutexV2Attr {
8783	return func(m optionalAttr) {
8784		m["shared_name"] = value
8785	}
8786}
8787
8788// Creates a Mutex resource that can be locked by `MutexLock`.
8789//
8790// Returns The mutex resource.
8791func MutexV2(scope *Scope, optional ...MutexV2Attr) (resource tf.Output) {
8792	if scope.Err() != nil {
8793		return
8794	}
8795	attrs := map[string]interface{}{}
8796	for _, a := range optional {
8797		a(attrs)
8798	}
8799	opspec := tf.OpSpec{
8800		Type: "MutexV2",
8801
8802		Attrs: attrs,
8803	}
8804	op := scope.AddOperation(opspec)
8805	return op.Output(0)
8806}
8807
8808// Connects N inputs to an N-way replicated TPU computation.
8809func TPUReplicatedInput(scope *Scope, inputs []tf.Output) (output tf.Output) {
8810	if scope.Err() != nil {
8811		return
8812	}
8813	opspec := tf.OpSpec{
8814		Type: "TPUReplicatedInput",
8815		Input: []tf.Input{
8816			tf.OutputList(inputs),
8817		},
8818	}
8819	op := scope.AddOperation(opspec)
8820	return op.Output(0)
8821}
8822
8823// AvgPool3DAttr is an optional argument to AvgPool3D.
8824type AvgPool3DAttr func(optionalAttr)
8825
8826// AvgPool3DDataFormat sets the optional data_format attribute to value.
8827//
8828// value: The data format of the input and output data. With the
8829// default format "NDHWC", the data is stored in the order of:
8830//     [batch, in_depth, in_height, in_width, in_channels].
8831// Alternatively, the format could be "NCDHW", the data storage order is:
8832//     [batch, in_channels, in_depth, in_height, in_width].
8833// If not specified, defaults to "NDHWC"
8834func AvgPool3DDataFormat(value string) AvgPool3DAttr {
8835	return func(m optionalAttr) {
8836		m["data_format"] = value
8837	}
8838}
8839
8840// Performs 3D average pooling on the input.
8841//
8842// Arguments:
8843//	input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over.
8844//	ksize: 1-D tensor of length 5. The size of the window for each dimension of
8845// the input tensor. Must have `ksize[0] = ksize[4] = 1`.
8846//	strides: 1-D tensor of length 5. The stride of the sliding window for each
8847// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
8848//	padding: The type of padding algorithm to use.
8849//
8850// Returns The average pooled output tensor.
8851func AvgPool3D(scope *Scope, input tf.Output, ksize []int64, strides []int64, padding string, optional ...AvgPool3DAttr) (output tf.Output) {
8852	if scope.Err() != nil {
8853		return
8854	}
8855	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
8856	for _, a := range optional {
8857		a(attrs)
8858	}
8859	opspec := tf.OpSpec{
8860		Type: "AvgPool3D",
8861		Input: []tf.Input{
8862			input,
8863		},
8864		Attrs: attrs,
8865	}
8866	op := scope.AddOperation(opspec)
8867	return op.Output(0)
8868}
8869
8870// DepthToSpaceAttr is an optional argument to DepthToSpace.
8871type DepthToSpaceAttr func(optionalAttr)
8872
8873// DepthToSpaceDataFormat sets the optional data_format attribute to value.
8874// If not specified, defaults to "NHWC"
8875func DepthToSpaceDataFormat(value string) DepthToSpaceAttr {
8876	return func(m optionalAttr) {
8877		m["data_format"] = value
8878	}
8879}
8880
8881// DepthToSpace for tensors of type T.
8882//
8883// Rearranges data from depth into blocks of spatial data.
8884// This is the reverse transformation of SpaceToDepth. More specifically,
8885// this op outputs a copy of the input tensor where values from the `depth`
8886// dimension are moved in spatial blocks to the `height` and `width` dimensions.
8887// The attr `block_size` indicates the input block size and how the data is moved.
8888//
8889//   * Chunks of data of size `block_size * block_size` from depth are rearranged
8890//     into non-overlapping blocks of size `block_size x block_size`
8891//   * The width the output tensor is `input_depth * block_size`, whereas the
8892//     height is `input_height * block_size`.
8893//   * The Y, X coordinates within each block of the output image are determined
8894//     by the high order component of the input channel index.
8895//   * The depth of the input tensor must be divisible by
8896//     `block_size * block_size`.
8897//
8898// The `data_format` attr specifies the layout of the input and output tensors
8899// with the following options:
8900//   "NHWC": `[ batch, height, width, channels ]`
8901//   "NCHW": `[ batch, channels, height, width ]`
8902//   "NCHW_VECT_C":
8903//       `qint8 [ batch, channels / 4, height, width, 4 ]`
8904//
8905// It is useful to consider the operation as transforming a 6-D Tensor.
8906// e.g. for data_format = NHWC,
8907//      Each element in the input tensor can be specified via 6 coordinates,
8908//      ordered by decreasing memory layout significance as:
8909//      n,iY,iX,bY,bX,oC  (where n=batch index, iX, iY means X or Y coordinates
8910//                         within the input image, bX, bY means coordinates
8911//                         within the output block, oC means output channels).
8912//      The output would be the input transposed to the following layout:
8913//      n,iY,bY,iX,bX,oC
8914//
8915// This operation is useful for resizing the activations between convolutions
8916// (but keeping all data), e.g. instead of pooling. It is also useful for training
8917// purely convolutional models.
8918//
8919// For example, given an input of shape `[1, 1, 1, 4]`, data_format = "NHWC" and
8920// block_size = 2:
8921//
8922// ```
8923// x = [[[[1, 2, 3, 4]]]]
8924//
8925// ```
8926//
8927// This operation will output a tensor of shape `[1, 2, 2, 1]`:
8928//
8929// ```
8930//    [[[[1], [2]],
8931//      [[3], [4]]]]
8932// ```
8933//
8934// Here, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`,
8935// the corresponding output will have 2x2 elements and will have a depth of
8936// 1 channel (1 = `4 / (block_size * block_size)`).
8937// The output element shape is `[2, 2, 1]`.
8938//
8939// For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g.
8940//
8941// ```
8942// x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
8943// ```
8944//
8945// This operation, for block size of 2, will return the following tensor of shape
8946// `[1, 2, 2, 3]`
8947//
8948// ```
8949//    [[[[1, 2, 3], [4, 5, 6]],
8950//      [[7, 8, 9], [10, 11, 12]]]]
8951//
8952// ```
8953//
8954// Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2:
8955//
8956// ```
8957// x =  [[[[1, 2, 3, 4],
8958//        [5, 6, 7, 8]],
8959//       [[9, 10, 11, 12],
8960//        [13, 14, 15, 16]]]]
8961// ```
8962//
8963// the operator will return the following tensor of shape `[1 4 4 1]`:
8964//
8965// ```
8966// x = [[[ [1],   [2],  [5],  [6]],
8967//       [ [3],   [4],  [7],  [8]],
8968//       [ [9],  [10], [13],  [14]],
8969//       [ [11], [12], [15],  [16]]]]
8970//
8971// ```
8972//
8973// Arguments:
8974//
8975//	block_size: The size of the spatial block, same as in Space2Depth.
8976func DepthToSpace(scope *Scope, input tf.Output, block_size int64, optional ...DepthToSpaceAttr) (output tf.Output) {
8977	if scope.Err() != nil {
8978		return
8979	}
8980	attrs := map[string]interface{}{"block_size": block_size}
8981	for _, a := range optional {
8982		a(attrs)
8983	}
8984	opspec := tf.OpSpec{
8985		Type: "DepthToSpace",
8986		Input: []tf.Input{
8987			input,
8988		},
8989		Attrs: attrs,
8990	}
8991	op := scope.AddOperation(opspec)
8992	return op.Output(0)
8993}
8994
8995// Conv3DBackpropInputV2Attr is an optional argument to Conv3DBackpropInputV2.
8996type Conv3DBackpropInputV2Attr func(optionalAttr)
8997
8998// Conv3DBackpropInputV2DataFormat sets the optional data_format attribute to value.
8999//
9000// value: The data format of the input and output data. With the
9001// default format "NDHWC", the data is stored in the order of:
9002//     [batch, in_depth, in_height, in_width, in_channels].
9003// Alternatively, the format could be "NCDHW", the data storage order is:
9004//     [batch, in_channels, in_depth, in_height, in_width].
9005// If not specified, defaults to "NDHWC"
9006func Conv3DBackpropInputV2DataFormat(value string) Conv3DBackpropInputV2Attr {
9007	return func(m optionalAttr) {
9008		m["data_format"] = value
9009	}
9010}
9011
9012// Conv3DBackpropInputV2Dilations sets the optional dilations attribute to value.
9013//
9014// value: 1-D tensor of length 5.  The dilation factor for each dimension of
9015// `input`. If set to k > 1, there will be k-1 skipped cells between each
9016// filter element on that dimension. The dimension order is determined by the
9017// value of `data_format`, see above for details. Dilations in the batch and
9018// depth dimensions must be 1.
9019// If not specified, defaults to <i:1 i:1 i:1 i:1 i:1 >
9020func Conv3DBackpropInputV2Dilations(value []int64) Conv3DBackpropInputV2Attr {
9021	return func(m optionalAttr) {
9022		m["dilations"] = value
9023	}
9024}
9025
9026// Computes the gradients of 3-D convolution with respect to the input.
9027//
9028// Arguments:
9029//	input_sizes: An integer vector representing the tensor shape of `input`,
9030// where `input` is a 5-D
9031// `[batch, depth, rows, cols, in_channels]` tensor.
9032//	filter: Shape `[depth, rows, cols, in_channels, out_channels]`.
9033// `in_channels` must match between `input` and `filter`.
9034//	out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
9035// out_channels]`.
9036//	strides: 1-D tensor of length 5. The stride of the sliding window for each
9037// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
9038//	padding: The type of padding algorithm to use.
9039func Conv3DBackpropInputV2(scope *Scope, input_sizes tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv3DBackpropInputV2Attr) (output tf.Output) {
9040	if scope.Err() != nil {
9041		return
9042	}
9043	attrs := map[string]interface{}{"strides": strides, "padding": padding}
9044	for _, a := range optional {
9045		a(attrs)
9046	}
9047	opspec := tf.OpSpec{
9048		Type: "Conv3DBackpropInputV2",
9049		Input: []tf.Input{
9050			input_sizes, filter, out_backprop,
9051		},
9052		Attrs: attrs,
9053	}
9054	op := scope.AddOperation(opspec)
9055	return op.Output(0)
9056}
9057
9058// Conv3DBackpropInputAttr is an optional argument to Conv3DBackpropInput.
9059type Conv3DBackpropInputAttr func(optionalAttr)
9060
9061// Conv3DBackpropInputDilations sets the optional dilations attribute to value.
9062// If not specified, defaults to <i:1 i:1 i:1 i:1 i:1 >
9063func Conv3DBackpropInputDilations(value []int64) Conv3DBackpropInputAttr {
9064	return func(m optionalAttr) {
9065		m["dilations"] = value
9066	}
9067}
9068
9069// Computes the gradients of 3-D convolution with respect to the input.
9070//
9071// DEPRECATED at GraphDef version 10: Use Conv3DBackpropInputV2
9072//
9073// Arguments:
9074//	input: Shape `[batch, depth, rows, cols, in_channels]`.
9075//	filter: Shape `[depth, rows, cols, in_channels, out_channels]`.
9076// `in_channels` must match between `input` and `filter`.
9077//	out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
9078// out_channels]`.
9079//	strides: 1-D tensor of length 5. The stride of the sliding window for each
9080// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
9081//	padding: The type of padding algorithm to use.
9082func Conv3DBackpropInput(scope *Scope, input tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv3DBackpropInputAttr) (output tf.Output) {
9083	if scope.Err() != nil {
9084		return
9085	}
9086	attrs := map[string]interface{}{"strides": strides, "padding": padding}
9087	for _, a := range optional {
9088		a(attrs)
9089	}
9090	opspec := tf.OpSpec{
9091		Type: "Conv3DBackpropInput",
9092		Input: []tf.Input{
9093			input, filter, out_backprop,
9094		},
9095		Attrs: attrs,
9096	}
9097	op := scope.AddOperation(opspec)
9098	return op.Output(0)
9099}
9100
9101// DepthwiseConv2dNativeAttr is an optional argument to DepthwiseConv2dNative.
9102type DepthwiseConv2dNativeAttr func(optionalAttr)
9103
9104// DepthwiseConv2dNativeDataFormat sets the optional data_format attribute to value.
9105//
9106// value: Specify the data format of the input and output data. With the
9107// default format "NHWC", the data is stored in the order of:
9108//     [batch, height, width, channels].
9109// Alternatively, the format could be "NCHW", the data storage order of:
9110//     [batch, channels, height, width].
9111// If not specified, defaults to "NHWC"
9112func DepthwiseConv2dNativeDataFormat(value string) DepthwiseConv2dNativeAttr {
9113	return func(m optionalAttr) {
9114		m["data_format"] = value
9115	}
9116}
9117
9118// DepthwiseConv2dNativeDilations sets the optional dilations attribute to value.
9119//
9120// value: 1-D tensor of length 4.  The dilation factor for each dimension of
9121// `input`. If set to k > 1, there will be k-1 skipped cells between each filter
9122// element on that dimension. The dimension order is determined by the value of
9123// `data_format`, see above for details. Dilations in the batch and depth
9124// dimensions must be 1.
9125// If not specified, defaults to <i:1 i:1 i:1 i:1 >
9126func DepthwiseConv2dNativeDilations(value []int64) DepthwiseConv2dNativeAttr {
9127	return func(m optionalAttr) {
9128		m["dilations"] = value
9129	}
9130}
9131
9132// Computes a 2-D depthwise convolution given 4-D `input` and `filter` tensors.
9133//
9134// Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
9135// and a filter / kernel tensor of shape
9136// `[filter_height, filter_width, in_channels, channel_multiplier]`, containing
9137// `in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies
9138// a different filter to each input channel (expanding from 1 channel to
9139// `channel_multiplier` channels for each), then concatenates the results
9140// together. Thus, the output has `in_channels * channel_multiplier` channels.
9141//
9142// ```
9143// for k in 0..in_channels-1
9144//   for q in 0..channel_multiplier-1
9145//     output[b, i, j, k * channel_multiplier + q] =
9146//       sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] *
9147//                         filter[di, dj, k, q]
9148// ```
9149//
9150// Must have `strides[0] = strides[3] = 1`.  For the most common case of the same
9151// horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
9152//
9153// Arguments:
9154//
9155//
9156//	strides: 1-D of length 4.  The stride of the sliding window for each dimension
9157// of `input`.
9158//	padding: The type of padding algorithm to use.
9159func DepthwiseConv2dNative(scope *Scope, input tf.Output, filter tf.Output, strides []int64, padding string, optional ...DepthwiseConv2dNativeAttr) (output tf.Output) {
9160	if scope.Err() != nil {
9161		return
9162	}
9163	attrs := map[string]interface{}{"strides": strides, "padding": padding}
9164	for _, a := range optional {
9165		a(attrs)
9166	}
9167	opspec := tf.OpSpec{
9168		Type: "DepthwiseConv2dNative",
9169		Input: []tf.Input{
9170			input, filter,
9171		},
9172		Attrs: attrs,
9173	}
9174	op := scope.AddOperation(opspec)
9175	return op.Output(0)
9176}
9177
9178// MaxPoolGradAttr is an optional argument to MaxPoolGrad.
9179type MaxPoolGradAttr func(optionalAttr)
9180
9181// MaxPoolGradDataFormat sets the optional data_format attribute to value.
9182//
9183// value: Specify the data format of the input and output data. With the
9184// default format "NHWC", the data is stored in the order of:
9185//     [batch, in_height, in_width, in_channels].
9186// Alternatively, the format could be "NCHW", the data storage order of:
9187//     [batch, in_channels, in_height, in_width].
9188// If not specified, defaults to "NHWC"
9189func MaxPoolGradDataFormat(value string) MaxPoolGradAttr {
9190	return func(m optionalAttr) {
9191		m["data_format"] = value
9192	}
9193}
9194
9195// Computes gradients of the maxpooling function.
9196//
9197// Arguments:
9198//	orig_input: The original input tensor.
9199//	orig_output: The original output tensor.
9200//	grad: 4-D.  Gradients w.r.t. the output of `max_pool`.
9201//	ksize: The size of the window for each dimension of the input tensor.
9202//	strides: The stride of the sliding window for each dimension of the
9203// input tensor.
9204//	padding: The type of padding algorithm to use.
9205//
9206// Returns Gradients w.r.t. the input to `max_pool`.
9207func MaxPoolGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPoolGradAttr) (output tf.Output) {
9208	if scope.Err() != nil {
9209		return
9210	}
9211	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
9212	for _, a := range optional {
9213		a(attrs)
9214	}
9215	opspec := tf.OpSpec{
9216		Type: "MaxPoolGrad",
9217		Input: []tf.Input{
9218			orig_input, orig_output, grad,
9219		},
9220		Attrs: attrs,
9221	}
9222	op := scope.AddOperation(opspec)
9223	return op.Output(0)
9224}
9225
9226// CropAndResizeAttr is an optional argument to CropAndResize.
9227type CropAndResizeAttr func(optionalAttr)
9228
9229// CropAndResizeMethod sets the optional method attribute to value.
9230//
9231// value: A string specifying the sampling method for resizing. It can be either
9232// `"bilinear"` or `"nearest"` and default to `"bilinear"`. Currently two sampling
9233// methods are supported: Bilinear and Nearest Neighbor.
9234// If not specified, defaults to "bilinear"
9235func CropAndResizeMethod(value string) CropAndResizeAttr {
9236	return func(m optionalAttr) {
9237		m["method"] = value
9238	}
9239}
9240
9241// CropAndResizeExtrapolationValue sets the optional extrapolation_value attribute to value.
9242//
9243// value: Value used for extrapolation, when applicable.
9244// If not specified, defaults to 0
9245func CropAndResizeExtrapolationValue(value float32) CropAndResizeAttr {
9246	return func(m optionalAttr) {
9247		m["extrapolation_value"] = value
9248	}
9249}
9250
9251// Extracts crops from the input image tensor and resizes them.
9252//
9253// Extracts crops from the input image tensor and resizes them using bilinear
9254// sampling or nearest neighbor sampling (possibly with aspect ratio change) to a
9255// common output size specified by `crop_size`. This is more general than the
9256// `crop_to_bounding_box` op which extracts a fixed size slice from the input image
9257// and does not allow resizing or aspect ratio change.
9258//
9259// Returns a tensor with `crops` from the input `image` at positions defined at the
9260// bounding box locations in `boxes`. The cropped boxes are all resized (with
9261// bilinear or nearest neighbor interpolation) to a fixed
9262// `size = [crop_height, crop_width]`. The result is a 4-D tensor
9263// `[num_boxes, crop_height, crop_width, depth]`. The resizing is corner aligned.
9264// In particular, if `boxes = [[0, 0, 1, 1]]`, the method will give identical
9265// results to using `tf.image.resize_bilinear()` or
9266// `tf.image.resize_nearest_neighbor()`(depends on the `method` argument) with
9267// `align_corners=True`.
9268//
9269// Arguments:
9270//	image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
9271// Both `image_height` and `image_width` need to be positive.
9272//	boxes: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
9273// specifies the coordinates of a box in the `box_ind[i]` image and is specified
9274// in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of
9275// `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the
9276// `[0, 1]` interval of normalized image height is mapped to
9277// `[0, image_height - 1]` in image height coordinates. We do allow `y1` > `y2`, in
9278// which case the sampled crop is an up-down flipped version of the original
9279// image. The width dimension is treated similarly. Normalized coordinates
9280// outside the `[0, 1]` range are allowed, in which case we use
9281// `extrapolation_value` to extrapolate the input image values.
9282//	box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
9283// The value of `box_ind[i]` specifies the image that the `i`-th box refers to.
9284//	crop_size: A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All
9285// cropped image patches are resized to this size. The aspect ratio of the image
9286// content is not preserved. Both `crop_height` and `crop_width` need to be
9287// positive.
9288//
9289// Returns A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.
9290func CropAndResize(scope *Scope, image tf.Output, boxes tf.Output, box_ind tf.Output, crop_size tf.Output, optional ...CropAndResizeAttr) (crops tf.Output) {
9291	if scope.Err() != nil {
9292		return
9293	}
9294	attrs := map[string]interface{}{}
9295	for _, a := range optional {
9296		a(attrs)
9297	}
9298	opspec := tf.OpSpec{
9299		Type: "CropAndResize",
9300		Input: []tf.Input{
9301			image, boxes, box_ind, crop_size,
9302		},
9303		Attrs: attrs,
9304	}
9305	op := scope.AddOperation(opspec)
9306	return op.Output(0)
9307}
9308
9309// Conv2DBackpropFilterAttr is an optional argument to Conv2DBackpropFilter.
9310type Conv2DBackpropFilterAttr func(optionalAttr)
9311
9312// Conv2DBackpropFilterUseCudnnOnGpu sets the optional use_cudnn_on_gpu attribute to value.
9313// If not specified, defaults to true
9314func Conv2DBackpropFilterUseCudnnOnGpu(value bool) Conv2DBackpropFilterAttr {
9315	return func(m optionalAttr) {
9316		m["use_cudnn_on_gpu"] = value
9317	}
9318}
9319
9320// Conv2DBackpropFilterExplicitPaddings sets the optional explicit_paddings attribute to value.
9321//
9322// value: If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith
9323// dimension, the amount of padding inserted before and after the dimension is
9324// `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If
9325// `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty.
9326// If not specified, defaults to <>
9327func Conv2DBackpropFilterExplicitPaddings(value []int64) Conv2DBackpropFilterAttr {
9328	return func(m optionalAttr) {
9329		m["explicit_paddings"] = value
9330	}
9331}
9332
9333// Conv2DBackpropFilterDataFormat sets the optional data_format attribute to value.
9334//
9335// value: Specify the data format of the input and output data. With the
9336// default format "NHWC", the data is stored in the order of:
9337//     [batch, in_height, in_width, in_channels].
9338// Alternatively, the format could be "NCHW", the data storage order of:
9339//     [batch, in_channels, in_height, in_width].
9340// If not specified, defaults to "NHWC"
9341func Conv2DBackpropFilterDataFormat(value string) Conv2DBackpropFilterAttr {
9342	return func(m optionalAttr) {
9343		m["data_format"] = value
9344	}
9345}
9346
9347// Conv2DBackpropFilterDilations sets the optional dilations attribute to value.
9348//
9349// value: 1-D tensor of length 4.  The dilation factor for each dimension of
9350// `input`. If set to k > 1, there will be k-1 skipped cells between each filter
9351// element on that dimension. The dimension order is determined by the value of
9352// `data_format`, see above for details. Dilations in the batch and depth
9353// dimensions must be 1.
9354// If not specified, defaults to <i:1 i:1 i:1 i:1 >
9355func Conv2DBackpropFilterDilations(value []int64) Conv2DBackpropFilterAttr {
9356	return func(m optionalAttr) {
9357		m["dilations"] = value
9358	}
9359}
9360
9361// Computes the gradients of convolution with respect to the filter.
9362//
9363// Arguments:
9364//	input: 4-D with shape `[batch, in_height, in_width, in_channels]`.
9365//	filter_sizes: An integer vector representing the tensor shape of `filter`,
9366// where `filter` is a 4-D
9367// `[filter_height, filter_width, in_channels, out_channels]` tensor.
9368//	out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`.
9369// Gradients w.r.t. the output of the convolution.
9370//	strides: The stride of the sliding window for each dimension of the input
9371// of the convolution. Must be in the same order as the dimension specified with
9372// format.
9373//	padding: The type of padding algorithm to use.
9374//
9375// Returns 4-D with shape
9376// `[filter_height, filter_width, in_channels, out_channels]`.  Gradient w.r.t.
9377// the `filter` input of the convolution.
9378func Conv2DBackpropFilter(scope *Scope, input tf.Output, filter_sizes tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv2DBackpropFilterAttr) (output tf.Output) {
9379	if scope.Err() != nil {
9380		return
9381	}
9382	attrs := map[string]interface{}{"strides": strides, "padding": padding}
9383	for _, a := range optional {
9384		a(attrs)
9385	}
9386	opspec := tf.OpSpec{
9387		Type: "Conv2DBackpropFilter",
9388		Input: []tf.Input{
9389			input, filter_sizes, out_backprop,
9390		},
9391		Attrs: attrs,
9392	}
9393	op := scope.AddOperation(opspec)
9394	return op.Output(0)
9395}
9396
9397// Computes Psi, the derivative of Lgamma (the log of the absolute value of
9398//
9399// `Gamma(x)`), element-wise.
9400func Digamma(scope *Scope, x tf.Output) (y tf.Output) {
9401	if scope.Err() != nil {
9402		return
9403	}
9404	opspec := tf.OpSpec{
9405		Type: "Digamma",
9406		Input: []tf.Input{
9407			x,
9408		},
9409	}
9410	op := scope.AddOperation(opspec)
9411	return op.Output(0)
9412}
9413
9414// Returns the number of work units this Reader has finished processing.
9415//
9416// Arguments:
9417//	reader_handle: Handle to a Reader.
9418func ReaderNumWorkUnitsCompletedV2(scope *Scope, reader_handle tf.Output) (units_completed tf.Output) {
9419	if scope.Err() != nil {
9420		return
9421	}
9422	opspec := tf.OpSpec{
9423		Type: "ReaderNumWorkUnitsCompletedV2",
9424		Input: []tf.Input{
9425			reader_handle,
9426		},
9427	}
9428	op := scope.AddOperation(opspec)
9429	return op.Output(0)
9430}
9431
9432// Conv2DAttr is an optional argument to Conv2D.
9433type Conv2DAttr func(optionalAttr)
9434
9435// Conv2DUseCudnnOnGpu sets the optional use_cudnn_on_gpu attribute to value.
9436// If not specified, defaults to true
9437func Conv2DUseCudnnOnGpu(value bool) Conv2DAttr {
9438	return func(m optionalAttr) {
9439		m["use_cudnn_on_gpu"] = value
9440	}
9441}
9442
9443// Conv2DExplicitPaddings sets the optional explicit_paddings attribute to value.
9444//
9445// value: If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith
9446// dimension, the amount of padding inserted before and after the dimension is
9447// `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If
9448// `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty.
9449// If not specified, defaults to <>
9450func Conv2DExplicitPaddings(value []int64) Conv2DAttr {
9451	return func(m optionalAttr) {
9452		m["explicit_paddings"] = value
9453	}
9454}
9455
9456// Conv2DDataFormat sets the optional data_format attribute to value.
9457//
9458// value: Specify the data format of the input and output data. With the
9459// default format "NHWC", the data is stored in the order of:
9460//     [batch, height, width, channels].
9461// Alternatively, the format could be "NCHW", the data storage order of:
9462//     [batch, channels, height, width].
9463// If not specified, defaults to "NHWC"
9464func Conv2DDataFormat(value string) Conv2DAttr {
9465	return func(m optionalAttr) {
9466		m["data_format"] = value
9467	}
9468}
9469
9470// Conv2DDilations sets the optional dilations attribute to value.
9471//
9472// value: 1-D tensor of length 4.  The dilation factor for each dimension of
9473// `input`. If set to k > 1, there will be k-1 skipped cells between each
9474// filter element on that dimension. The dimension order is determined by the
9475// value of `data_format`, see above for details. Dilations in the batch and
9476// depth dimensions must be 1.
9477// If not specified, defaults to <i:1 i:1 i:1 i:1 >
9478func Conv2DDilations(value []int64) Conv2DAttr {
9479	return func(m optionalAttr) {
9480		m["dilations"] = value
9481	}
9482}
9483
9484// Computes a 2-D convolution given 4-D `input` and `filter` tensors.
9485//
9486// Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
9487// and a filter / kernel tensor of shape
9488// `[filter_height, filter_width, in_channels, out_channels]`, this op
9489// performs the following:
9490//
9491// 1. Flattens the filter to a 2-D matrix with shape
9492//    `[filter_height * filter_width * in_channels, output_channels]`.
9493// 2. Extracts image patches from the input tensor to form a *virtual*
9494//    tensor of shape `[batch, out_height, out_width,
9495//    filter_height * filter_width * in_channels]`.
9496// 3. For each patch, right-multiplies the filter matrix and the image patch
9497//    vector.
9498//
9499// In detail, with the default NHWC format,
9500//
9501//     output[b, i, j, k] =
9502//         sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] *
9503//                         filter[di, dj, q, k]
9504//
9505// Must have `strides[0] = strides[3] = 1`.  For the most common case of the same
9506// horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
9507//
9508// Arguments:
9509//	input: A 4-D tensor. The dimension order is interpreted according to the value
9510// of `data_format`, see below for details.
9511//	filter: A 4-D tensor of shape
9512// `[filter_height, filter_width, in_channels, out_channels]`
9513//	strides: 1-D tensor of length 4.  The stride of the sliding window for each
9514// dimension of `input`. The dimension order is determined by the value of
9515// `data_format`, see below for details.
9516//	padding: The type of padding algorithm to use.
9517//
9518// Returns A 4-D tensor. The dimension order is determined by the value of
9519// `data_format`, see below for details.
9520func Conv2D(scope *Scope, input tf.Output, filter tf.Output, strides []int64, padding string, optional ...Conv2DAttr) (output tf.Output) {
9521	if scope.Err() != nil {
9522		return
9523	}
9524	attrs := map[string]interface{}{"strides": strides, "padding": padding}
9525	for _, a := range optional {
9526		a(attrs)
9527	}
9528	opspec := tf.OpSpec{
9529		Type: "Conv2D",
9530		Input: []tf.Input{
9531			input, filter,
9532		},
9533		Attrs: attrs,
9534	}
9535	op := scope.AddOperation(opspec)
9536	return op.Output(0)
9537}
9538
9539// Fills empty rows in the input 2-D `SparseTensor` with a default value.
9540//
9541// The input `SparseTensor` is represented via the tuple of inputs
9542// (`indices`, `values`, `dense_shape`).  The output `SparseTensor` has the
9543// same `dense_shape` but with indices `output_indices` and values
9544// `output_values`.
9545//
9546// This op inserts a single entry for every row that doesn't have any values.
9547// The index is created as `[row, 0, ..., 0]` and the inserted value
9548// is `default_value`.
9549//
9550// For example, suppose `sp_input` has shape `[5, 6]` and non-empty values:
9551//
9552//     [0, 1]: a
9553//     [0, 3]: b
9554//     [2, 0]: c
9555//     [3, 1]: d
9556//
9557// Rows 1 and 4 are empty, so the output will be of shape `[5, 6]` with values:
9558//
9559//     [0, 1]: a
9560//     [0, 3]: b
9561//     [1, 0]: default_value
9562//     [2, 0]: c
9563//     [3, 1]: d
9564//     [4, 0]: default_value
9565//
9566// The output `SparseTensor` will be in row-major order and will have the
9567// same shape as the input.
9568//
9569// This op also returns an indicator vector shaped `[dense_shape[0]]` such that
9570//
9571//     empty_row_indicator[i] = True iff row i was an empty row.
9572//
9573// And a reverse index map vector shaped `[indices.shape[0]]` that is used during
9574// backpropagation,
9575//
9576//     reverse_index_map[j] = out_j s.t. indices[j, :] == output_indices[out_j, :]
9577//
9578// Arguments:
9579//	indices: 2-D. the indices of the sparse tensor.
9580//	values: 1-D. the values of the sparse tensor.
9581//	dense_shape: 1-D. the shape of the sparse tensor.
9582//	default_value: 0-D. default value to insert into location `[row, 0, ..., 0]`
9583//   for rows missing from the input sparse tensor.
9584// output indices: 2-D. the indices of the filled sparse tensor.
9585//
9586// Returns 1-D. the values of the filled sparse tensor.1-D. whether the dense row was missing in the
9587// input sparse tensor.1-D. a map from the input indices to the output indices.
9588func SparseFillEmptyRows(scope *Scope, indices tf.Output, values tf.Output, dense_shape tf.Output, default_value tf.Output) (output_indices tf.Output, output_values tf.Output, empty_row_indicator tf.Output, reverse_index_map tf.Output) {
9589	if scope.Err() != nil {
9590		return
9591	}
9592	opspec := tf.OpSpec{
9593		Type: "SparseFillEmptyRows",
9594		Input: []tf.Input{
9595			indices, values, dense_shape, default_value,
9596		},
9597	}
9598	op := scope.AddOperation(opspec)
9599	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
9600}
9601
9602// LoadTPUEmbeddingADAMParametersGradAccumDebugAttr is an optional argument to LoadTPUEmbeddingADAMParametersGradAccumDebug.
9603type LoadTPUEmbeddingADAMParametersGradAccumDebugAttr func(optionalAttr)
9604
9605// LoadTPUEmbeddingADAMParametersGradAccumDebugTableId sets the optional table_id attribute to value.
9606// If not specified, defaults to -1
9607//
9608// REQUIRES: value >= -1
9609func LoadTPUEmbeddingADAMParametersGradAccumDebugTableId(value int64) LoadTPUEmbeddingADAMParametersGradAccumDebugAttr {
9610	return func(m optionalAttr) {
9611		m["table_id"] = value
9612	}
9613}
9614
9615// LoadTPUEmbeddingADAMParametersGradAccumDebugTableName sets the optional table_name attribute to value.
9616// If not specified, defaults to ""
9617func LoadTPUEmbeddingADAMParametersGradAccumDebugTableName(value string) LoadTPUEmbeddingADAMParametersGradAccumDebugAttr {
9618	return func(m optionalAttr) {
9619		m["table_name"] = value
9620	}
9621}
9622
9623// Load ADAM embedding parameters with debug support.
9624//
9625// An op that loads optimization parameters into HBM for embedding. Must be
9626// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
9627// embedding table configuration. For example, this op is used to install
9628// parameters that are loaded from a checkpoint before a training loop is
9629// executed.
9630//
9631// Arguments:
9632//	parameters: Value of parameters used in the ADAM optimization algorithm.
9633//	momenta: Value of momenta used in the ADAM optimization algorithm.
9634//	velocities: Value of velocities used in the ADAM optimization algorithm.
9635//	gradient_accumulators: Value of gradient_accumulators used in the ADAM optimization algorithm.
9636//
9637//
9638//
9639// Returns the created operation.
9640func LoadTPUEmbeddingADAMParametersGradAccumDebug(scope *Scope, parameters tf.Output, momenta tf.Output, velocities tf.Output, gradient_accumulators tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingADAMParametersGradAccumDebugAttr) (o *tf.Operation) {
9641	if scope.Err() != nil {
9642		return
9643	}
9644	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
9645	for _, a := range optional {
9646		a(attrs)
9647	}
9648	opspec := tf.OpSpec{
9649		Type: "LoadTPUEmbeddingADAMParametersGradAccumDebug",
9650		Input: []tf.Input{
9651			parameters, momenta, velocities, gradient_accumulators,
9652		},
9653		Attrs: attrs,
9654	}
9655	return scope.AddOperation(opspec)
9656}
9657
9658// BiasAddAttr is an optional argument to BiasAdd.
9659type BiasAddAttr func(optionalAttr)
9660
9661// BiasAddDataFormat sets the optional data_format attribute to value.
9662//
9663// value: Specify the data format of the input and output data. With the
9664// default format "NHWC", the bias tensor will be added to the last dimension
9665// of the value tensor.
9666// Alternatively, the format could be "NCHW", the data storage order of:
9667//     [batch, in_channels, in_height, in_width].
9668// The tensor will be added to "in_channels", the third-to-the-last
9669//     dimension.
9670// If not specified, defaults to "NHWC"
9671func BiasAddDataFormat(value string) BiasAddAttr {
9672	return func(m optionalAttr) {
9673		m["data_format"] = value
9674	}
9675}
9676
9677// Adds `bias` to `value`.
9678//
9679// This is a special case of `tf.add` where `bias` is restricted to be 1-D.
9680// Broadcasting is supported, so `value` may have any number of dimensions.
9681//
9682// Arguments:
9683//	value: Any number of dimensions.
9684//	bias: 1-D with size the last dimension of `value`.
9685//
9686// Returns Broadcasted sum of `value` and `bias`.
9687func BiasAdd(scope *Scope, value tf.Output, bias tf.Output, optional ...BiasAddAttr) (output tf.Output) {
9688	if scope.Err() != nil {
9689		return
9690	}
9691	attrs := map[string]interface{}{}
9692	for _, a := range optional {
9693		a(attrs)
9694	}
9695	opspec := tf.OpSpec{
9696		Type: "BiasAdd",
9697		Input: []tf.Input{
9698			value, bias,
9699		},
9700		Attrs: attrs,
9701	}
9702	op := scope.AddOperation(opspec)
9703	return op.Output(0)
9704}
9705
9706// SparseReduceSumSparseAttr is an optional argument to SparseReduceSumSparse.
9707type SparseReduceSumSparseAttr func(optionalAttr)
9708
9709// SparseReduceSumSparseKeepDims sets the optional keep_dims attribute to value.
9710//
9711// value: If true, retain reduced dimensions with length 1.
9712// If not specified, defaults to false
9713func SparseReduceSumSparseKeepDims(value bool) SparseReduceSumSparseAttr {
9714	return func(m optionalAttr) {
9715		m["keep_dims"] = value
9716	}
9717}
9718
9719// Computes the sum of elements across dimensions of a SparseTensor.
9720//
9721// This Op takes a SparseTensor and is the sparse counterpart to
9722// `tf.reduce_sum()`.  In contrast to SparseReduceSum, this Op returns a
9723// SparseTensor.
9724//
9725// Reduces `sp_input` along the dimensions given in `reduction_axes`.  Unless
9726// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
9727// `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
9728// with length 1.
9729//
9730// If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
9731// with a single element is returned.  Additionally, the axes can be negative,
9732// which are interpreted according to the indexing rules in Python.
9733//
9734// Arguments:
9735//	input_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
9736// SparseTensor, possibly not in canonical ordering.
9737//	input_values: 1-D.  `N` non-empty values corresponding to `input_indices`.
9738//	input_shape: 1-D.  Shape of the input SparseTensor.
9739//	reduction_axes: 1-D.  Length-`K` vector containing the reduction axes.
9740func SparseReduceSumSparse(scope *Scope, input_indices tf.Output, input_values tf.Output, input_shape tf.Output, reduction_axes tf.Output, optional ...SparseReduceSumSparseAttr) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output) {
9741	if scope.Err() != nil {
9742		return
9743	}
9744	attrs := map[string]interface{}{}
9745	for _, a := range optional {
9746		a(attrs)
9747	}
9748	opspec := tf.OpSpec{
9749		Type: "SparseReduceSumSparse",
9750		Input: []tf.Input{
9751			input_indices, input_values, input_shape, reduction_axes,
9752		},
9753		Attrs: attrs,
9754	}
9755	op := scope.AddOperation(opspec)
9756	return op.Output(0), op.Output(1), op.Output(2)
9757}
9758
9759// LoadTPUEmbeddingStochasticGradientDescentParametersAttr is an optional argument to LoadTPUEmbeddingStochasticGradientDescentParameters.
9760type LoadTPUEmbeddingStochasticGradientDescentParametersAttr func(optionalAttr)
9761
9762// LoadTPUEmbeddingStochasticGradientDescentParametersTableId sets the optional table_id attribute to value.
9763// If not specified, defaults to -1
9764//
9765// REQUIRES: value >= -1
9766func LoadTPUEmbeddingStochasticGradientDescentParametersTableId(value int64) LoadTPUEmbeddingStochasticGradientDescentParametersAttr {
9767	return func(m optionalAttr) {
9768		m["table_id"] = value
9769	}
9770}
9771
9772// LoadTPUEmbeddingStochasticGradientDescentParametersTableName sets the optional table_name attribute to value.
9773// If not specified, defaults to ""
9774func LoadTPUEmbeddingStochasticGradientDescentParametersTableName(value string) LoadTPUEmbeddingStochasticGradientDescentParametersAttr {
9775	return func(m optionalAttr) {
9776		m["table_name"] = value
9777	}
9778}
9779
9780// Load SGD embedding parameters.
9781//
9782// An op that loads optimization parameters into HBM for embedding. Must be
9783// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
9784// embedding table configuration. For example, this op is used to install
9785// parameters that are loaded from a checkpoint before a training loop is
9786// executed.
9787//
9788// Arguments:
9789//	parameters: Value of parameters used in the stochastic gradient descent optimization algorithm.
9790//
9791//
9792//
9793// Returns the created operation.
9794func LoadTPUEmbeddingStochasticGradientDescentParameters(scope *Scope, parameters tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingStochasticGradientDescentParametersAttr) (o *tf.Operation) {
9795	if scope.Err() != nil {
9796		return
9797	}
9798	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
9799	for _, a := range optional {
9800		a(attrs)
9801	}
9802	opspec := tf.OpSpec{
9803		Type: "LoadTPUEmbeddingStochasticGradientDescentParameters",
9804		Input: []tf.Input{
9805			parameters,
9806		},
9807		Attrs: attrs,
9808	}
9809	return scope.AddOperation(opspec)
9810}
9811
9812// Selects the k nearest centers for each point.
9813//
9814// Rows of points are assumed to be input points. Rows of centers are assumed to be
9815// the list of candidate centers. For each point, the k centers that have least L2
9816// distance to it are computed.
9817//
9818// Arguments:
9819//	points: Matrix of shape (n, d). Rows are assumed to be input points.
9820//	centers: Matrix of shape (m, d). Rows are assumed to be centers.
9821//	k: Number of nearest centers to return for each point. If k is larger than m, then
9822// only m centers are returned.
9823//
9824// Returns Matrix of shape (n, min(m, k)). Each row contains the indices of the centers
9825// closest to the corresponding point, ordered by increasing distance.Matrix of shape (n, min(m, k)). Each row contains the squared L2 distance to the
9826// corresponding center in nearest_center_indices.
9827func NearestNeighbors(scope *Scope, points tf.Output, centers tf.Output, k tf.Output) (nearest_center_indices tf.Output, nearest_center_distances tf.Output) {
9828	if scope.Err() != nil {
9829		return
9830	}
9831	opspec := tf.OpSpec{
9832		Type: "NearestNeighbors",
9833		Input: []tf.Input{
9834			points, centers, k,
9835		},
9836	}
9837	op := scope.AddOperation(opspec)
9838	return op.Output(0), op.Output(1)
9839}
9840
9841// Returns x * y element-wise.
9842//
9843// *NOTE*: `Multiply` supports broadcasting. More about broadcasting
9844// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
9845func Mul(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
9846	if scope.Err() != nil {
9847		return
9848	}
9849	opspec := tf.OpSpec{
9850		Type: "Mul",
9851		Input: []tf.Input{
9852			x, y,
9853		},
9854	}
9855	op := scope.AddOperation(opspec)
9856	return op.Output(0)
9857}
9858
9859// FusedBatchNormV2Attr is an optional argument to FusedBatchNormV2.
9860type FusedBatchNormV2Attr func(optionalAttr)
9861
9862// FusedBatchNormV2Epsilon sets the optional epsilon attribute to value.
9863//
9864// value: A small float number added to the variance of x.
9865// If not specified, defaults to 0.0001
9866func FusedBatchNormV2Epsilon(value float32) FusedBatchNormV2Attr {
9867	return func(m optionalAttr) {
9868		m["epsilon"] = value
9869	}
9870}
9871
9872// FusedBatchNormV2DataFormat sets the optional data_format attribute to value.
9873//
9874// value: The data format for x and y. Either "NHWC" (default) or "NCHW".
9875// If not specified, defaults to "NHWC"
9876func FusedBatchNormV2DataFormat(value string) FusedBatchNormV2Attr {
9877	return func(m optionalAttr) {
9878		m["data_format"] = value
9879	}
9880}
9881
9882// FusedBatchNormV2IsTraining sets the optional is_training attribute to value.
9883//
9884// value: A bool value to indicate the operation is for training (default)
9885// or inference.
9886// If not specified, defaults to true
9887func FusedBatchNormV2IsTraining(value bool) FusedBatchNormV2Attr {
9888	return func(m optionalAttr) {
9889		m["is_training"] = value
9890	}
9891}
9892
9893// Batch normalization.
9894//
9895// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
9896// The size of 1D Tensors matches the dimension C of the 4D Tensors.
9897//
9898// Arguments:
9899//	x: A 4D Tensor for input data.
9900//	scale: A 1D Tensor for scaling factor, to scale the normalized x.
9901//	offset: A 1D Tensor for offset, to shift to the normalized x.
9902//	mean: A 1D Tensor for population mean. Used for inference only;
9903// must be empty for training.
9904//	variance: A 1D Tensor for population variance. Used for inference only;
9905// must be empty for training.
9906//
9907// Returns A 4D Tensor for output data.A 1D Tensor for the computed batch mean, to be used by TensorFlow
9908// to compute the running mean.A 1D Tensor for the computed batch variance, to be used by
9909// TensorFlow to compute the running variance.A 1D Tensor for the computed batch mean, to be reused
9910// in the gradient computation.A 1D Tensor for the computed batch variance (inverted variance
9911// in the cuDNN case), to be reused in the gradient computation.
9912func FusedBatchNormV2(scope *Scope, x tf.Output, scale tf.Output, offset tf.Output, mean tf.Output, variance tf.Output, optional ...FusedBatchNormV2Attr) (y tf.Output, batch_mean tf.Output, batch_variance tf.Output, reserve_space_1 tf.Output, reserve_space_2 tf.Output) {
9913	if scope.Err() != nil {
9914		return
9915	}
9916	attrs := map[string]interface{}{}
9917	for _, a := range optional {
9918		a(attrs)
9919	}
9920	opspec := tf.OpSpec{
9921		Type: "FusedBatchNormV2",
9922		Input: []tf.Input{
9923			x, scale, offset, mean, variance,
9924		},
9925		Attrs: attrs,
9926	}
9927	op := scope.AddOperation(opspec)
9928	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
9929}
9930
9931// Reverses specific dimensions of a tensor.
9932//
9933// NOTE `tf.reverse` has now changed behavior in preparation for 1.0.
9934// `tf.reverse_v2` is currently an alias that will be deprecated before TF 1.0.
9935//
9936// Given a `tensor`, and a `int32` tensor `axis` representing the set of
9937// dimensions of `tensor` to reverse. This operation reverses each dimension
9938// `i` for which there exists `j` s.t. `axis[j] == i`.
9939//
9940// `tensor` can have up to 8 dimensions. The number of dimensions specified
9941// in `axis` may be 0 or more entries. If an index is specified more than
9942// once, a InvalidArgument error is raised.
9943//
9944// For example:
9945//
9946// ```
9947// # tensor 't' is [[[[ 0,  1,  2,  3],
9948// #                  [ 4,  5,  6,  7],
9949// #                  [ 8,  9, 10, 11]],
9950// #                 [[12, 13, 14, 15],
9951// #                  [16, 17, 18, 19],
9952// #                  [20, 21, 22, 23]]]]
9953// # tensor 't' shape is [1, 2, 3, 4]
9954//
9955// # 'dims' is [3] or 'dims' is [-1]
9956// reverse(t, dims) ==> [[[[ 3,  2,  1,  0],
9957//                         [ 7,  6,  5,  4],
9958//                         [ 11, 10, 9, 8]],
9959//                        [[15, 14, 13, 12],
9960//                         [19, 18, 17, 16],
9961//                         [23, 22, 21, 20]]]]
9962//
9963// # 'dims' is '[1]' (or 'dims' is '[-3]')
9964// reverse(t, dims) ==> [[[[12, 13, 14, 15],
9965//                         [16, 17, 18, 19],
9966//                         [20, 21, 22, 23]
9967//                        [[ 0,  1,  2,  3],
9968//                         [ 4,  5,  6,  7],
9969//                         [ 8,  9, 10, 11]]]]
9970//
9971// # 'dims' is '[2]' (or 'dims' is '[-2]')
9972// reverse(t, dims) ==> [[[[8, 9, 10, 11],
9973//                         [4, 5, 6, 7],
9974//                         [0, 1, 2, 3]]
9975//                        [[20, 21, 22, 23],
9976//                         [16, 17, 18, 19],
9977//                         [12, 13, 14, 15]]]]
9978// ```
9979//
9980// Arguments:
9981//	tensor: Up to 8-D.
9982//	axis: 1-D. The indices of the dimensions to reverse. Must be in the range
9983// `[-rank(tensor), rank(tensor))`.
9984//
9985// Returns The same shape as `tensor`.
9986func ReverseV2(scope *Scope, tensor tf.Output, axis tf.Output) (output tf.Output) {
9987	if scope.Err() != nil {
9988		return
9989	}
9990	opspec := tf.OpSpec{
9991		Type: "ReverseV2",
9992		Input: []tf.Input{
9993			tensor, axis,
9994		},
9995	}
9996	op := scope.AddOperation(opspec)
9997	return op.Output(0)
9998}
9999
10000// Adds `bias` to `value`.
10001//
10002// This is a deprecated version of BiasAdd and will be soon removed.
10003//
10004// This is a special case of `tf.add` where `bias` is restricted to be 1-D.
10005// Broadcasting is supported, so `value` may have any number of dimensions.
10006//
10007// Arguments:
10008//	value: Any number of dimensions.
10009//	bias: 1-D with size the last dimension of `value`.
10010//
10011// Returns Broadcasted sum of `value` and `bias`.
10012func BiasAddV1(scope *Scope, value tf.Output, bias tf.Output) (output tf.Output) {
10013	if scope.Err() != nil {
10014		return
10015	}
10016	opspec := tf.OpSpec{
10017		Type: "BiasAddV1",
10018		Input: []tf.Input{
10019			value, bias,
10020		},
10021	}
10022	op := scope.AddOperation(opspec)
10023	return op.Output(0)
10024}
10025
10026// Selects num_to_sample rows of input using the KMeans++ criterion.
10027//
10028// Rows of points are assumed to be input points. One row is selected at random.
10029// Subsequent rows are sampled with probability proportional to the squared L2
10030// distance from the nearest row selected thus far till num_to_sample rows have
10031// been sampled.
10032//
10033// Arguments:
10034//	points: Matrix of shape (n, d). Rows are assumed to be input points.
10035//	num_to_sample: Scalar. The number of rows to sample. This value must not be larger than n.
10036//	seed: Scalar. Seed for initializing the random number generator.
10037//	num_retries_per_sample: Scalar. For each row that is sampled, this parameter
10038// specifies the number of additional points to draw from the current
10039// distribution before selecting the best. If a negative value is specified, a
10040// heuristic is used to sample O(log(num_to_sample)) additional points.
10041//
10042// Returns Matrix of shape (num_to_sample, d). The sampled rows.
10043func KmeansPlusPlusInitialization(scope *Scope, points tf.Output, num_to_sample tf.Output, seed tf.Output, num_retries_per_sample tf.Output) (samples tf.Output) {
10044	if scope.Err() != nil {
10045		return
10046	}
10047	opspec := tf.OpSpec{
10048		Type: "KmeansPlusPlusInitialization",
10049		Input: []tf.Input{
10050			points, num_to_sample, seed, num_retries_per_sample,
10051		},
10052	}
10053	op := scope.AddOperation(opspec)
10054	return op.Output(0)
10055}
10056
10057// Transforms a Tensor into a serialized TensorProto proto.
10058//
10059// Arguments:
10060//	tensor: A Tensor of type `T`.
10061//
10062// Returns A serialized TensorProto proto of the input tensor.
10063func SerializeTensor(scope *Scope, tensor tf.Output) (serialized tf.Output) {
10064	if scope.Err() != nil {
10065		return
10066	}
10067	opspec := tf.OpSpec{
10068		Type: "SerializeTensor",
10069		Input: []tf.Input{
10070			tensor,
10071		},
10072	}
10073	op := scope.AddOperation(opspec)
10074	return op.Output(0)
10075}
10076
10077// UnbatchGradAttr is an optional argument to UnbatchGrad.
10078type UnbatchGradAttr func(optionalAttr)
10079
10080// UnbatchGradContainer sets the optional container attribute to value.
10081// If not specified, defaults to ""
10082func UnbatchGradContainer(value string) UnbatchGradAttr {
10083	return func(m optionalAttr) {
10084		m["container"] = value
10085	}
10086}
10087
10088// UnbatchGradSharedName sets the optional shared_name attribute to value.
10089// If not specified, defaults to ""
10090func UnbatchGradSharedName(value string) UnbatchGradAttr {
10091	return func(m optionalAttr) {
10092		m["shared_name"] = value
10093	}
10094}
10095
10096// Gradient of Unbatch.
10097//
10098// Acts like Batch but using the given batch_index index of batching things as they
10099// become available. This ensures that the gradients are propagated back in the
10100// same session which did the forward pass.
10101//
10102// original_input: The input to the Unbatch operation this is the gradient of.
10103// batch_index: The batch_index given to the Unbatch operation this is the gradient
10104// of.
10105// grad: The downstream gradient.
10106// id: The id scalar emitted by Batch.
10107// batched_grad: The return value, either an empty tensor or the batched gradient.
10108// container: Container to control resource sharing.
10109// shared_name: Instances of UnbatchGrad with the same container and shared_name
10110//  are assumed to possibly belong to the same batch. If left empty, the op name
10111//  will be used as the shared name.
10112func UnbatchGrad(scope *Scope, original_input tf.Output, batch_index tf.Output, grad tf.Output, id tf.Output, optional ...UnbatchGradAttr) (batched_grad tf.Output) {
10113	if scope.Err() != nil {
10114		return
10115	}
10116	attrs := map[string]interface{}{}
10117	for _, a := range optional {
10118		a(attrs)
10119	}
10120	opspec := tf.OpSpec{
10121		Type: "UnbatchGrad",
10122		Input: []tf.Input{
10123			original_input, batch_index, grad, id,
10124		},
10125		Attrs: attrs,
10126	}
10127	op := scope.AddOperation(opspec)
10128	return op.Output(0)
10129}
10130
10131// AvgPool3DGradAttr is an optional argument to AvgPool3DGrad.
10132type AvgPool3DGradAttr func(optionalAttr)
10133
10134// AvgPool3DGradDataFormat sets the optional data_format attribute to value.
10135//
10136// value: The data format of the input and output data. With the
10137// default format "NDHWC", the data is stored in the order of:
10138//     [batch, in_depth, in_height, in_width, in_channels].
10139// Alternatively, the format could be "NCDHW", the data storage order is:
10140//     [batch, in_channels, in_depth, in_height, in_width].
10141// If not specified, defaults to "NDHWC"
10142func AvgPool3DGradDataFormat(value string) AvgPool3DGradAttr {
10143	return func(m optionalAttr) {
10144		m["data_format"] = value
10145	}
10146}
10147
10148// Computes gradients of average pooling function.
10149//
10150// Arguments:
10151//	orig_input_shape: The original input dimensions.
10152//	grad: Output backprop of shape `[batch, depth, rows, cols, channels]`.
10153//	ksize: 1-D tensor of length 5. The size of the window for each dimension of
10154// the input tensor. Must have `ksize[0] = ksize[4] = 1`.
10155//	strides: 1-D tensor of length 5. The stride of the sliding window for each
10156// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
10157//	padding: The type of padding algorithm to use.
10158//
10159// Returns The backprop for input.
10160func AvgPool3DGrad(scope *Scope, orig_input_shape tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...AvgPool3DGradAttr) (output tf.Output) {
10161	if scope.Err() != nil {
10162		return
10163	}
10164	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
10165	for _, a := range optional {
10166		a(attrs)
10167	}
10168	opspec := tf.OpSpec{
10169		Type: "AvgPool3DGrad",
10170		Input: []tf.Input{
10171			orig_input_shape, grad,
10172		},
10173		Attrs: attrs,
10174	}
10175	op := scope.AddOperation(opspec)
10176	return op.Output(0)
10177}
10178
10179// ParseSingleSequenceExampleAttr is an optional argument to ParseSingleSequenceExample.
10180type ParseSingleSequenceExampleAttr func(optionalAttr)
10181
10182// ParseSingleSequenceExampleContextSparseTypes sets the optional context_sparse_types attribute to value.
10183//
10184// value: A list of Ncontext_sparse types; the data types of data in
10185// each context Feature given in context_sparse_keys.
10186// Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),
10187// DT_INT64 (Int64List), and DT_STRING (BytesList).
10188// If not specified, defaults to <>
10189//
10190// REQUIRES: len(value) >= 0
10191func ParseSingleSequenceExampleContextSparseTypes(value []tf.DataType) ParseSingleSequenceExampleAttr {
10192	return func(m optionalAttr) {
10193		m["context_sparse_types"] = value
10194	}
10195}
10196
10197// ParseSingleSequenceExampleFeatureListDenseTypes sets the optional feature_list_dense_types attribute to value.
10198// If not specified, defaults to <>
10199//
10200// REQUIRES: len(value) >= 0
10201func ParseSingleSequenceExampleFeatureListDenseTypes(value []tf.DataType) ParseSingleSequenceExampleAttr {
10202	return func(m optionalAttr) {
10203		m["feature_list_dense_types"] = value
10204	}
10205}
10206
10207// ParseSingleSequenceExampleContextDenseShapes sets the optional context_dense_shapes attribute to value.
10208//
10209// value: A list of Ncontext_dense shapes; the shapes of data in
10210// each context Feature given in context_dense_keys.
10211// The number of elements in the Feature corresponding to context_dense_key[j]
10212// must always equal context_dense_shapes[j].NumEntries().
10213// The shape of context_dense_values[j] will match context_dense_shapes[j].
10214// If not specified, defaults to <>
10215//
10216// REQUIRES: len(value) >= 0
10217func ParseSingleSequenceExampleContextDenseShapes(value []tf.Shape) ParseSingleSequenceExampleAttr {
10218	return func(m optionalAttr) {
10219		m["context_dense_shapes"] = value
10220	}
10221}
10222
10223// ParseSingleSequenceExampleFeatureListSparseTypes sets the optional feature_list_sparse_types attribute to value.
10224//
10225// value: A list of Nfeature_list_sparse types; the data types
10226// of data in each FeatureList given in feature_list_sparse_keys.
10227// Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),
10228// DT_INT64 (Int64List), and DT_STRING (BytesList).
10229// If not specified, defaults to <>
10230//
10231// REQUIRES: len(value) >= 0
10232func ParseSingleSequenceExampleFeatureListSparseTypes(value []tf.DataType) ParseSingleSequenceExampleAttr {
10233	return func(m optionalAttr) {
10234		m["feature_list_sparse_types"] = value
10235	}
10236}
10237
10238// ParseSingleSequenceExampleFeatureListDenseShapes sets the optional feature_list_dense_shapes attribute to value.
10239//
10240// value: A list of Nfeature_list_dense shapes; the shapes of
10241// data in each FeatureList given in feature_list_dense_keys.
10242// The shape of each Feature in the FeatureList corresponding to
10243// feature_list_dense_key[j] must always equal
10244// feature_list_dense_shapes[j].NumEntries().
10245// If not specified, defaults to <>
10246//
10247// REQUIRES: len(value) >= 0
10248func ParseSingleSequenceExampleFeatureListDenseShapes(value []tf.Shape) ParseSingleSequenceExampleAttr {
10249	return func(m optionalAttr) {
10250		m["feature_list_dense_shapes"] = value
10251	}
10252}
10253
10254// Transforms a scalar brain.SequenceExample proto (as strings) into typed tensors.
10255//
10256// Arguments:
10257//	serialized: A scalar containing a binary serialized SequenceExample proto.
10258//	feature_list_dense_missing_assumed_empty: A vector listing the
10259// FeatureList keys which may be missing from the SequenceExample.  If the
10260// associated FeatureList is missing, it is treated as empty.  By default,
10261// any FeatureList not listed in this vector must exist in the SequenceExample.
10262//	context_sparse_keys: A list of Ncontext_sparse string Tensors (scalars).
10263// The keys expected in the Examples' features associated with context_sparse
10264// values.
10265//	context_dense_keys: A list of Ncontext_dense string Tensors (scalars).
10266// The keys expected in the SequenceExamples' context features associated with
10267// dense values.
10268//	feature_list_sparse_keys: A list of Nfeature_list_sparse string Tensors
10269// (scalars).  The keys expected in the FeatureLists associated with sparse
10270// values.
10271//	feature_list_dense_keys: A list of Nfeature_list_dense string Tensors (scalars).
10272// The keys expected in the SequenceExamples' feature_lists associated
10273// with lists of dense values.
10274//	context_dense_defaults: A list of Ncontext_dense Tensors (some may be empty).
10275// context_dense_defaults[j] provides default values
10276// when the SequenceExample's context map lacks context_dense_key[j].
10277// If an empty Tensor is provided for context_dense_defaults[j],
10278// then the Feature context_dense_keys[j] is required.
10279// The input type is inferred from context_dense_defaults[j], even when it's
10280// empty.  If context_dense_defaults[j] is not empty, its shape must match
10281// context_dense_shapes[j].
10282//	debug_name: A scalar containing the name of the serialized proto.
10283// May contain, for example, table key (descriptive) name for the
10284// corresponding serialized proto.  This is purely useful for debugging
10285// purposes, and the presence of values here has no effect on the output.
10286// May also be an empty scalar if no name is available.
10287func ParseSingleSequenceExample(scope *Scope, serialized tf.Output, feature_list_dense_missing_assumed_empty tf.Output, context_sparse_keys []tf.Output, context_dense_keys []tf.Output, feature_list_sparse_keys []tf.Output, feature_list_dense_keys []tf.Output, context_dense_defaults []tf.Output, debug_name tf.Output, optional ...ParseSingleSequenceExampleAttr) (context_sparse_indices []tf.Output, context_sparse_values []tf.Output, context_sparse_shapes []tf.Output, context_dense_values []tf.Output, feature_list_sparse_indices []tf.Output, feature_list_sparse_values []tf.Output, feature_list_sparse_shapes []tf.Output, feature_list_dense_values []tf.Output) {
10288	if scope.Err() != nil {
10289		return
10290	}
10291	attrs := map[string]interface{}{}
10292	for _, a := range optional {
10293		a(attrs)
10294	}
10295	opspec := tf.OpSpec{
10296		Type: "ParseSingleSequenceExample",
10297		Input: []tf.Input{
10298			serialized, feature_list_dense_missing_assumed_empty, tf.OutputList(context_sparse_keys), tf.OutputList(context_dense_keys), tf.OutputList(feature_list_sparse_keys), tf.OutputList(feature_list_dense_keys), tf.OutputList(context_dense_defaults), debug_name,
10299		},
10300		Attrs: attrs,
10301	}
10302	op := scope.AddOperation(opspec)
10303	if scope.Err() != nil {
10304		return
10305	}
10306	var idx int
10307	var err error
10308	if context_sparse_indices, idx, err = makeOutputList(op, idx, "context_sparse_indices"); err != nil {
10309		scope.UpdateErr("ParseSingleSequenceExample", err)
10310		return
10311	}
10312	if context_sparse_values, idx, err = makeOutputList(op, idx, "context_sparse_values"); err != nil {
10313		scope.UpdateErr("ParseSingleSequenceExample", err)
10314		return
10315	}
10316	if context_sparse_shapes, idx, err = makeOutputList(op, idx, "context_sparse_shapes"); err != nil {
10317		scope.UpdateErr("ParseSingleSequenceExample", err)
10318		return
10319	}
10320	if context_dense_values, idx, err = makeOutputList(op, idx, "context_dense_values"); err != nil {
10321		scope.UpdateErr("ParseSingleSequenceExample", err)
10322		return
10323	}
10324	if feature_list_sparse_indices, idx, err = makeOutputList(op, idx, "feature_list_sparse_indices"); err != nil {
10325		scope.UpdateErr("ParseSingleSequenceExample", err)
10326		return
10327	}
10328	if feature_list_sparse_values, idx, err = makeOutputList(op, idx, "feature_list_sparse_values"); err != nil {
10329		scope.UpdateErr("ParseSingleSequenceExample", err)
10330		return
10331	}
10332	if feature_list_sparse_shapes, idx, err = makeOutputList(op, idx, "feature_list_sparse_shapes"); err != nil {
10333		scope.UpdateErr("ParseSingleSequenceExample", err)
10334		return
10335	}
10336	if feature_list_dense_values, idx, err = makeOutputList(op, idx, "feature_list_dense_values"); err != nil {
10337		scope.UpdateErr("ParseSingleSequenceExample", err)
10338		return
10339	}
10340	return context_sparse_indices, context_sparse_values, context_sparse_shapes, context_dense_values, feature_list_sparse_indices, feature_list_sparse_values, feature_list_sparse_shapes, feature_list_dense_values
10341}
10342
10343// SparseToDenseAttr is an optional argument to SparseToDense.
10344type SparseToDenseAttr func(optionalAttr)
10345
10346// SparseToDenseValidateIndices sets the optional validate_indices attribute to value.
10347//
10348// value: If true, indices are checked to make sure they are sorted in
10349// lexicographic order and that there are no repeats.
10350// If not specified, defaults to true
10351func SparseToDenseValidateIndices(value bool) SparseToDenseAttr {
10352	return func(m optionalAttr) {
10353		m["validate_indices"] = value
10354	}
10355}
10356
10357// Converts a sparse representation into a dense tensor.
10358//
10359// Builds an array `dense` with shape `output_shape` such that
10360//
10361// ```
10362// # If sparse_indices is scalar
10363// dense[i] = (i == sparse_indices ? sparse_values : default_value)
10364//
10365// # If sparse_indices is a vector, then for each i
10366// dense[sparse_indices[i]] = sparse_values[i]
10367//
10368// # If sparse_indices is an n by d matrix, then for each i in [0, n)
10369// dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i]
10370// ```
10371//
10372// All other values in `dense` are set to `default_value`.  If `sparse_values` is a
10373// scalar, all sparse indices are set to this single value.
10374//
10375// Indices should be sorted in lexicographic order, and indices must not
10376// contain any repeats. If `validate_indices` is true, these properties
10377// are checked during execution.
10378//
10379// Arguments:
10380//	sparse_indices: 0-D, 1-D, or 2-D.  `sparse_indices[i]` contains the complete
10381// index where `sparse_values[i]` will be placed.
10382//	output_shape: 1-D.  Shape of the dense output tensor.
10383//	sparse_values: 1-D.  Values corresponding to each row of `sparse_indices`,
10384// or a scalar value to be used for all sparse indices.
10385//	default_value: Scalar value to set for indices not specified in
10386// `sparse_indices`.
10387//
10388// Returns Dense output tensor of shape `output_shape`.
10389func SparseToDense(scope *Scope, sparse_indices tf.Output, output_shape tf.Output, sparse_values tf.Output, default_value tf.Output, optional ...SparseToDenseAttr) (dense tf.Output) {
10390	if scope.Err() != nil {
10391		return
10392	}
10393	attrs := map[string]interface{}{}
10394	for _, a := range optional {
10395		a(attrs)
10396	}
10397	opspec := tf.OpSpec{
10398		Type: "SparseToDense",
10399		Input: []tf.Input{
10400			sparse_indices, output_shape, sparse_values, default_value,
10401		},
10402		Attrs: attrs,
10403	}
10404	op := scope.AddOperation(opspec)
10405	return op.Output(0)
10406}
10407
10408// PreventGradientAttr is an optional argument to PreventGradient.
10409type PreventGradientAttr func(optionalAttr)
10410
10411// PreventGradientMessage sets the optional message attribute to value.
10412//
10413// value: Will be printed in the error when anyone tries to differentiate
10414// this operation.
10415// If not specified, defaults to ""
10416func PreventGradientMessage(value string) PreventGradientAttr {
10417	return func(m optionalAttr) {
10418		m["message"] = value
10419	}
10420}
10421
10422// An identity op that triggers an error if a gradient is requested.
10423//
10424// When executed in a graph, this op outputs its input tensor as-is.
10425//
10426// When building ops to compute gradients, the TensorFlow gradient system
10427// will return an error when trying to lookup the gradient of this op,
10428// because no gradient must ever be registered for this function.  This
10429// op exists to prevent subtle bugs from silently returning unimplemented
10430// gradients in some corner cases.
10431//
10432// Arguments:
10433//	input: any tensor.
10434//
10435// Returns the same input tensor.
10436func PreventGradient(scope *Scope, input tf.Output, optional ...PreventGradientAttr) (output tf.Output) {
10437	if scope.Err() != nil {
10438		return
10439	}
10440	attrs := map[string]interface{}{}
10441	for _, a := range optional {
10442		a(attrs)
10443	}
10444	opspec := tf.OpSpec{
10445		Type: "PreventGradient",
10446		Input: []tf.Input{
10447			input,
10448		},
10449		Attrs: attrs,
10450	}
10451	op := scope.AddOperation(opspec)
10452	return op.Output(0)
10453}
10454
10455// Computes asin of x element-wise.
10456func Asin(scope *Scope, x tf.Output) (y tf.Output) {
10457	if scope.Err() != nil {
10458		return
10459	}
10460	opspec := tf.OpSpec{
10461		Type: "Asin",
10462		Input: []tf.Input{
10463			x,
10464		},
10465	}
10466	op := scope.AddOperation(opspec)
10467	return op.Output(0)
10468}
10469
10470// Computes the sum along sparse segments of a tensor.
10471//
10472// Like `SparseSegmentSum`, but allows missing ids in `segment_ids`. If an id is
10473// misisng, the `output` tensor at that position will be zeroed.
10474//
10475// Read
10476// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/sparse#Segmentation)
10477// for an explanation of segments.
10478//
10479// For example:
10480//
10481// ```python
10482// c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
10483//
10484// tf.sparse_segment_sum_with_num_segments(
10485//     c, tf.constant([0, 1]), tf.constant([0, 0]), num_segments=3)
10486// # => [[0 0 0 0]
10487// #     [0 0 0 0]
10488// #     [0 0 0 0]]
10489//
10490// tf.sparse_segment_sum_with_num_segments(c,
10491//                                         tf.constant([0, 1]),
10492//                                         tf.constant([0, 2],
10493//                                         num_segments=4))
10494// # => [[ 1  2  3  4]
10495// #     [ 0  0  0  0]
10496// #     [-1 -2 -3 -4]
10497// #     [ 0  0  0  0]]
10498// ```
10499//
10500// Arguments:
10501//
10502//	indices: A 1-D tensor. Has same rank as `segment_ids`.
10503//	segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
10504//	num_segments: Should equal the number of distinct segment IDs.
10505//
10506// Returns Has same shape as data, except for dimension 0 which
10507// has size `num_segments`.
10508func SparseSegmentSumWithNumSegments(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output) {
10509	if scope.Err() != nil {
10510		return
10511	}
10512	opspec := tf.OpSpec{
10513		Type: "SparseSegmentSumWithNumSegments",
10514		Input: []tf.Input{
10515			data, indices, segment_ids, num_segments,
10516		},
10517	}
10518	op := scope.AddOperation(opspec)
10519	return op.Output(0)
10520}
10521
10522// SparseReduceMaxAttr is an optional argument to SparseReduceMax.
10523type SparseReduceMaxAttr func(optionalAttr)
10524
10525// SparseReduceMaxKeepDims sets the optional keep_dims attribute to value.
10526//
10527// value: If true, retain reduced dimensions with length 1.
10528// If not specified, defaults to false
10529func SparseReduceMaxKeepDims(value bool) SparseReduceMaxAttr {
10530	return func(m optionalAttr) {
10531		m["keep_dims"] = value
10532	}
10533}
10534
10535// Computes the max of elements across dimensions of a SparseTensor.
10536//
10537// This Op takes a SparseTensor and is the sparse counterpart to
10538// `tf.reduce_max()`.  In particular, this Op also returns a dense `Tensor`
10539// instead of a sparse one.
10540//
10541// Reduces `sp_input` along the dimensions given in `reduction_axes`.  Unless
10542// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
10543// `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
10544// with length 1.
10545//
10546// If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
10547// with a single element is returned.  Additionally, the axes can be negative,
10548// which are interpreted according to the indexing rules in Python.
10549//
10550// Arguments:
10551//	input_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
10552// SparseTensor, possibly not in canonical ordering.
10553//	input_values: 1-D.  `N` non-empty values corresponding to `input_indices`.
10554//	input_shape: 1-D.  Shape of the input SparseTensor.
10555//	reduction_axes: 1-D.  Length-`K` vector containing the reduction axes.
10556//
10557// Returns `R-K`-D.  The reduced Tensor.
10558func SparseReduceMax(scope *Scope, input_indices tf.Output, input_values tf.Output, input_shape tf.Output, reduction_axes tf.Output, optional ...SparseReduceMaxAttr) (output tf.Output) {
10559	if scope.Err() != nil {
10560		return
10561	}
10562	attrs := map[string]interface{}{}
10563	for _, a := range optional {
10564		a(attrs)
10565	}
10566	opspec := tf.OpSpec{
10567		Type: "SparseReduceMax",
10568		Input: []tf.Input{
10569			input_indices, input_values, input_shape, reduction_axes,
10570		},
10571		Attrs: attrs,
10572	}
10573	op := scope.AddOperation(opspec)
10574	return op.Output(0)
10575}
10576
10577// DecodeRawAttr is an optional argument to DecodeRaw.
10578type DecodeRawAttr func(optionalAttr)
10579
10580// DecodeRawLittleEndian sets the optional little_endian attribute to value.
10581//
10582// value: Whether the input `bytes` are in little-endian order.
10583// Ignored for `out_type` values that are stored in a single byte like
10584// `uint8`.
10585// If not specified, defaults to true
10586func DecodeRawLittleEndian(value bool) DecodeRawAttr {
10587	return func(m optionalAttr) {
10588		m["little_endian"] = value
10589	}
10590}
10591
10592// Reinterpret the bytes of a string as a vector of numbers.
10593//
10594// Arguments:
10595//	bytes: All the elements must have the same length.
10596//
10597//
10598// Returns A Tensor with one more dimension than the input `bytes`.  The
10599// added dimension will have size equal to the length of the elements
10600// of `bytes` divided by the number of bytes to represent `out_type`.
10601func DecodeRaw(scope *Scope, bytes tf.Output, out_type tf.DataType, optional ...DecodeRawAttr) (output tf.Output) {
10602	if scope.Err() != nil {
10603		return
10604	}
10605	attrs := map[string]interface{}{"out_type": out_type}
10606	for _, a := range optional {
10607		a(attrs)
10608	}
10609	opspec := tf.OpSpec{
10610		Type: "DecodeRaw",
10611		Input: []tf.Input{
10612			bytes,
10613		},
10614		Attrs: attrs,
10615	}
10616	op := scope.AddOperation(opspec)
10617	return op.Output(0)
10618}
10619
10620// RetrieveTPUEmbeddingADAMParametersAttr is an optional argument to RetrieveTPUEmbeddingADAMParameters.
10621type RetrieveTPUEmbeddingADAMParametersAttr func(optionalAttr)
10622
10623// RetrieveTPUEmbeddingADAMParametersTableId sets the optional table_id attribute to value.
10624// If not specified, defaults to -1
10625//
10626// REQUIRES: value >= -1
10627func RetrieveTPUEmbeddingADAMParametersTableId(value int64) RetrieveTPUEmbeddingADAMParametersAttr {
10628	return func(m optionalAttr) {
10629		m["table_id"] = value
10630	}
10631}
10632
10633// RetrieveTPUEmbeddingADAMParametersTableName sets the optional table_name attribute to value.
10634// If not specified, defaults to ""
10635func RetrieveTPUEmbeddingADAMParametersTableName(value string) RetrieveTPUEmbeddingADAMParametersAttr {
10636	return func(m optionalAttr) {
10637		m["table_name"] = value
10638	}
10639}
10640
10641// Retrieve ADAM embedding parameters.
10642//
10643// An op that retrieves optimization parameters from embedding to host
10644// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
10645// the correct embedding table configuration. For example, this op is
10646// used to retrieve updated parameters before saving a checkpoint.
10647//
10648// Returns Parameter parameters updated by the ADAM optimization algorithm.Parameter momenta updated by the ADAM optimization algorithm.Parameter velocities updated by the ADAM optimization algorithm.
10649func RetrieveTPUEmbeddingADAMParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingADAMParametersAttr) (parameters tf.Output, momenta tf.Output, velocities tf.Output) {
10650	if scope.Err() != nil {
10651		return
10652	}
10653	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
10654	for _, a := range optional {
10655		a(attrs)
10656	}
10657	opspec := tf.OpSpec{
10658		Type: "RetrieveTPUEmbeddingADAMParameters",
10659
10660		Attrs: attrs,
10661	}
10662	op := scope.AddOperation(opspec)
10663	return op.Output(0), op.Output(1), op.Output(2)
10664}
10665
10666// FusedBatchNormAttr is an optional argument to FusedBatchNorm.
10667type FusedBatchNormAttr func(optionalAttr)
10668
10669// FusedBatchNormEpsilon sets the optional epsilon attribute to value.
10670//
10671// value: A small float number added to the variance of x.
10672// If not specified, defaults to 0.0001
10673func FusedBatchNormEpsilon(value float32) FusedBatchNormAttr {
10674	return func(m optionalAttr) {
10675		m["epsilon"] = value
10676	}
10677}
10678
10679// FusedBatchNormDataFormat sets the optional data_format attribute to value.
10680//
10681// value: The data format for x and y. Either "NHWC" (default) or "NCHW".
10682// If not specified, defaults to "NHWC"
10683func FusedBatchNormDataFormat(value string) FusedBatchNormAttr {
10684	return func(m optionalAttr) {
10685		m["data_format"] = value
10686	}
10687}
10688
10689// FusedBatchNormIsTraining sets the optional is_training attribute to value.
10690//
10691// value: A bool value to indicate the operation is for training (default)
10692// or inference.
10693// If not specified, defaults to true
10694func FusedBatchNormIsTraining(value bool) FusedBatchNormAttr {
10695	return func(m optionalAttr) {
10696		m["is_training"] = value
10697	}
10698}
10699
10700// Batch normalization.
10701//
10702// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
10703// The size of 1D Tensors matches the dimension C of the 4D Tensors.
10704//
10705// Arguments:
10706//	x: A 4D Tensor for input data.
10707//	scale: A 1D Tensor for scaling factor, to scale the normalized x.
10708//	offset: A 1D Tensor for offset, to shift to the normalized x.
10709//	mean: A 1D Tensor for population mean. Used for inference only;
10710// must be empty for training.
10711//	variance: A 1D Tensor for population variance. Used for inference only;
10712// must be empty for training.
10713//
10714// Returns A 4D Tensor for output data.A 1D Tensor for the computed batch mean, to be used by TensorFlow
10715// to compute the running mean.A 1D Tensor for the computed batch variance, to be used by
10716// TensorFlow to compute the running variance.A 1D Tensor for the computed batch mean, to be reused
10717// in the gradient computation.A 1D Tensor for the computed batch variance (inverted variance
10718// in the cuDNN case), to be reused in the gradient computation.
10719func FusedBatchNorm(scope *Scope, x tf.Output, scale tf.Output, offset tf.Output, mean tf.Output, variance tf.Output, optional ...FusedBatchNormAttr) (y tf.Output, batch_mean tf.Output, batch_variance tf.Output, reserve_space_1 tf.Output, reserve_space_2 tf.Output) {
10720	if scope.Err() != nil {
10721		return
10722	}
10723	attrs := map[string]interface{}{}
10724	for _, a := range optional {
10725		a(attrs)
10726	}
10727	opspec := tf.OpSpec{
10728		Type: "FusedBatchNorm",
10729		Input: []tf.Input{
10730			x, scale, offset, mean, variance,
10731		},
10732		Attrs: attrs,
10733	}
10734	op := scope.AddOperation(opspec)
10735	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
10736}
10737
10738// Creates a dataset that shards the input dataset.
10739//
10740// Creates a dataset that shards the input dataset by num_workers, returning a
10741// sharded dataset for the index-th worker. This attempts to automatically shard
10742// a dataset by examining the Dataset graph and inserting a shard op before the
10743// inputs to a reader Dataset (e.g. CSVDataset, TFRecordDataset).
10744//
10745// This dataset will throw a NotFound error if we cannot shard the dataset
10746// automatically.
10747//
10748// Arguments:
10749//	input_dataset: A variant tensor representing the input dataset.
10750//	num_workers: A scalar representing the number of workers to distribute this dataset across.
10751//	index: A scalar representing the index of the current worker out of num_workers.
10752//
10753//
10754func ExperimentalAutoShardDataset(scope *Scope, input_dataset tf.Output, num_workers tf.Output, index tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
10755	if scope.Err() != nil {
10756		return
10757	}
10758	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
10759	opspec := tf.OpSpec{
10760		Type: "ExperimentalAutoShardDataset",
10761		Input: []tf.Input{
10762			input_dataset, num_workers, index,
10763		},
10764		Attrs: attrs,
10765	}
10766	op := scope.AddOperation(opspec)
10767	return op.Output(0)
10768}
10769
10770// RandomStandardNormalAttr is an optional argument to RandomStandardNormal.
10771type RandomStandardNormalAttr func(optionalAttr)
10772
10773// RandomStandardNormalSeed sets the optional seed attribute to value.
10774//
10775// value: If either `seed` or `seed2` are set to be non-zero, the random number
10776// generator is seeded by the given seed.  Otherwise, it is seeded by a
10777// random seed.
10778// If not specified, defaults to 0
10779func RandomStandardNormalSeed(value int64) RandomStandardNormalAttr {
10780	return func(m optionalAttr) {
10781		m["seed"] = value
10782	}
10783}
10784
10785// RandomStandardNormalSeed2 sets the optional seed2 attribute to value.
10786//
10787// value: A second seed to avoid seed collision.
10788// If not specified, defaults to 0
10789func RandomStandardNormalSeed2(value int64) RandomStandardNormalAttr {
10790	return func(m optionalAttr) {
10791		m["seed2"] = value
10792	}
10793}
10794
10795// Outputs random values from a normal distribution.
10796//
10797// The generated values will have mean 0 and standard deviation 1.
10798//
10799// Arguments:
10800//	shape: The shape of the output tensor.
10801//	dtype: The type of the output.
10802//
10803// Returns A tensor of the specified shape filled with random normal values.
10804func RandomStandardNormal(scope *Scope, shape tf.Output, dtype tf.DataType, optional ...RandomStandardNormalAttr) (output tf.Output) {
10805	if scope.Err() != nil {
10806		return
10807	}
10808	attrs := map[string]interface{}{"dtype": dtype}
10809	for _, a := range optional {
10810		a(attrs)
10811	}
10812	opspec := tf.OpSpec{
10813		Type: "RandomStandardNormal",
10814		Input: []tf.Input{
10815			shape,
10816		},
10817		Attrs: attrs,
10818	}
10819	op := scope.AddOperation(opspec)
10820	return op.Output(0)
10821}
10822
10823// FusedResizeAndPadConv2DAttr is an optional argument to FusedResizeAndPadConv2D.
10824type FusedResizeAndPadConv2DAttr func(optionalAttr)
10825
10826// FusedResizeAndPadConv2DResizeAlignCorners sets the optional resize_align_corners attribute to value.
10827//
10828// value: If true, the centers of the 4 corner pixels of the input and output tensors are
10829// aligned, preserving the values at the corner pixels. Defaults to false.
10830// If not specified, defaults to false
10831func FusedResizeAndPadConv2DResizeAlignCorners(value bool) FusedResizeAndPadConv2DAttr {
10832	return func(m optionalAttr) {
10833		m["resize_align_corners"] = value
10834	}
10835}
10836
10837// Performs a resize and padding as a preprocess during a convolution.
10838//
10839// It's often possible to do spatial transformations more efficiently as part of
10840// the packing stage of a convolution, so this op allows for an optimized
10841// implementation where these stages are fused together. This prevents the need to
10842// write out the intermediate results as whole tensors, reducing memory pressure,
10843// and we can get some latency gains by merging the transformation calculations.
10844// The data_format attribute for Conv2D isn't supported by this op, and defaults to
10845// 'NHWC' order.
10846// Internally this op uses a single per-graph scratch buffer, which means that it
10847// will block if multiple versions are being run in parallel. This is because this
10848// operator is primarily an optimization to minimize memory usage.
10849//
10850// Arguments:
10851//	input: 4-D with shape `[batch, in_height, in_width, in_channels]`.
10852//	size: A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
10853// new size for the images.
10854//	paddings: A two-column matrix specifying the padding sizes. The number of
10855// rows must be the same as the rank of `input`.
10856//	filter: 4-D with shape
10857// `[filter_height, filter_width, in_channels, out_channels]`.
10858//
10859//	strides: 1-D of length 4.  The stride of the sliding window for each dimension
10860// of `input`. Must be in the same order as the dimension specified with format.
10861//	padding: The type of padding algorithm to use.
10862func FusedResizeAndPadConv2D(scope *Scope, input tf.Output, size tf.Output, paddings tf.Output, filter tf.Output, mode string, strides []int64, padding string, optional ...FusedResizeAndPadConv2DAttr) (output tf.Output) {
10863	if scope.Err() != nil {
10864		return
10865	}
10866	attrs := map[string]interface{}{"mode": mode, "strides": strides, "padding": padding}
10867	for _, a := range optional {
10868		a(attrs)
10869	}
10870	opspec := tf.OpSpec{
10871		Type: "FusedResizeAndPadConv2D",
10872		Input: []tf.Input{
10873			input, size, paddings, filter,
10874		},
10875		Attrs: attrs,
10876	}
10877	op := scope.AddOperation(opspec)
10878	return op.Output(0)
10879}
10880
10881// RandomUniformAttr is an optional argument to RandomUniform.
10882type RandomUniformAttr func(optionalAttr)
10883
10884// RandomUniformSeed sets the optional seed attribute to value.
10885//
10886// value: If either `seed` or `seed2` are set to be non-zero, the random number
10887// generator is seeded by the given seed.  Otherwise, it is seeded by a
10888// random seed.
10889// If not specified, defaults to 0
10890func RandomUniformSeed(value int64) RandomUniformAttr {
10891	return func(m optionalAttr) {
10892		m["seed"] = value
10893	}
10894}
10895
10896// RandomUniformSeed2 sets the optional seed2 attribute to value.
10897//
10898// value: A second seed to avoid seed collision.
10899// If not specified, defaults to 0
10900func RandomUniformSeed2(value int64) RandomUniformAttr {
10901	return func(m optionalAttr) {
10902		m["seed2"] = value
10903	}
10904}
10905
10906// Outputs random values from a uniform distribution.
10907//
10908// The generated values follow a uniform distribution in the range `[0, 1)`. The
10909// lower bound 0 is included in the range, while the upper bound 1 is excluded.
10910//
10911// Arguments:
10912//	shape: The shape of the output tensor.
10913//	dtype: The type of the output.
10914//
10915// Returns A tensor of the specified shape filled with uniform random values.
10916func RandomUniform(scope *Scope, shape tf.Output, dtype tf.DataType, optional ...RandomUniformAttr) (output tf.Output) {
10917	if scope.Err() != nil {
10918		return
10919	}
10920	attrs := map[string]interface{}{"dtype": dtype}
10921	for _, a := range optional {
10922		a(attrs)
10923	}
10924	opspec := tf.OpSpec{
10925		Type: "RandomUniform",
10926		Input: []tf.Input{
10927			shape,
10928		},
10929		Attrs: attrs,
10930	}
10931	op := scope.AddOperation(opspec)
10932	return op.Output(0)
10933}
10934
10935// ResourceApplyFtrlAttr is an optional argument to ResourceApplyFtrl.
10936type ResourceApplyFtrlAttr func(optionalAttr)
10937
10938// ResourceApplyFtrlUseLocking sets the optional use_locking attribute to value.
10939//
10940// value: If `True`, updating of the var and accum tensors will be protected
10941// by a lock; otherwise the behavior is undefined, but may exhibit less
10942// contention.
10943// If not specified, defaults to false
10944func ResourceApplyFtrlUseLocking(value bool) ResourceApplyFtrlAttr {
10945	return func(m optionalAttr) {
10946		m["use_locking"] = value
10947	}
10948}
10949
10950// Update '*var' according to the Ftrl-proximal scheme.
10951//
10952// accum_new = accum + grad * grad
10953// linear += grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
10954// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
10955// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
10956// accum = accum_new
10957//
10958// Arguments:
10959//	var_: Should be from a Variable().
10960//	accum: Should be from a Variable().
10961//	linear: Should be from a Variable().
10962//	grad: The gradient.
10963//	lr: Scaling factor. Must be a scalar.
10964//	l1: L1 regulariation. Must be a scalar.
10965//	l2: L2 regulariation. Must be a scalar.
10966//	lr_power: Scaling factor. Must be a scalar.
10967//
10968// Returns the created operation.
10969func ResourceApplyFtrl(scope *Scope, var_ tf.Output, accum tf.Output, linear tf.Output, grad tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, lr_power tf.Output, optional ...ResourceApplyFtrlAttr) (o *tf.Operation) {
10970	if scope.Err() != nil {
10971		return
10972	}
10973	attrs := map[string]interface{}{}
10974	for _, a := range optional {
10975		a(attrs)
10976	}
10977	opspec := tf.OpSpec{
10978		Type: "ResourceApplyFtrl",
10979		Input: []tf.Input{
10980			var_, accum, linear, grad, lr, l1, l2, lr_power,
10981		},
10982		Attrs: attrs,
10983	}
10984	return scope.AddOperation(opspec)
10985}
10986
10987// Transforms a vector of brain.Example protos (as strings) into typed tensors.
10988//
10989// Arguments:
10990//	serialized: A vector containing a batch of binary serialized Example protos.
10991//	names: A vector containing the names of the serialized protos.
10992// May contain, for example, table key (descriptive) names for the
10993// corresponding serialized protos.  These are purely useful for debugging
10994// purposes, and the presence of values here has no effect on the output.
10995// May also be an empty vector if no names are available.
10996// If non-empty, this vector must be the same length as "serialized".
10997//	sparse_keys: A list of Nsparse string Tensors (scalars).
10998// The keys expected in the Examples' features associated with sparse values.
10999//	dense_keys: A list of Ndense string Tensors (scalars).
11000// The keys expected in the Examples' features associated with dense values.
11001//	dense_defaults: A list of Ndense Tensors (some may be empty).
11002// dense_defaults[j] provides default values
11003// when the example's feature_map lacks dense_key[j].  If an empty Tensor is
11004// provided for dense_defaults[j], then the Feature dense_keys[j] is required.
11005// The input type is inferred from dense_defaults[j], even when it's empty.
11006// If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined,
11007// then the shape of dense_defaults[j] must match that of dense_shapes[j].
11008// If dense_shapes[j] has an undefined major dimension (variable strides dense
11009// feature), dense_defaults[j] must contain a single element:
11010// the padding element.
11011//	sparse_types: A list of Nsparse types; the data types of data in each Feature
11012// given in sparse_keys.
11013// Currently the ParseExample supports DT_FLOAT (FloatList),
11014// DT_INT64 (Int64List), and DT_STRING (BytesList).
11015//	dense_shapes: A list of Ndense shapes; the shapes of data in each Feature
11016// given in dense_keys.
11017// The number of elements in the Feature corresponding to dense_key[j]
11018// must always equal dense_shapes[j].NumEntries().
11019// If dense_shapes[j] == (D0, D1, ..., DN) then the shape of output
11020// Tensor dense_values[j] will be (|serialized|, D0, D1, ..., DN):
11021// The dense outputs are just the inputs row-stacked by batch.
11022// This works for dense_shapes[j] = (-1, D1, ..., DN).  In this case
11023// the shape of the output Tensor dense_values[j] will be
11024// (|serialized|, M, D1, .., DN), where M is the maximum number of blocks
11025// of elements of length D1 * .... * DN, across all minibatch entries
11026// in the input.  Any minibatch entry with less than M blocks of elements of
11027// length D1 * ... * DN will be padded with the corresponding default_value
11028// scalar element along the second dimension.
11029func ParseExample(scope *Scope, serialized tf.Output, names tf.Output, sparse_keys []tf.Output, dense_keys []tf.Output, dense_defaults []tf.Output, sparse_types []tf.DataType, dense_shapes []tf.Shape) (sparse_indices []tf.Output, sparse_values []tf.Output, sparse_shapes []tf.Output, dense_values []tf.Output) {
11030	if scope.Err() != nil {
11031		return
11032	}
11033	attrs := map[string]interface{}{"sparse_types": sparse_types, "dense_shapes": dense_shapes}
11034	opspec := tf.OpSpec{
11035		Type: "ParseExample",
11036		Input: []tf.Input{
11037			serialized, names, tf.OutputList(sparse_keys), tf.OutputList(dense_keys), tf.OutputList(dense_defaults),
11038		},
11039		Attrs: attrs,
11040	}
11041	op := scope.AddOperation(opspec)
11042	if scope.Err() != nil {
11043		return
11044	}
11045	var idx int
11046	var err error
11047	if sparse_indices, idx, err = makeOutputList(op, idx, "sparse_indices"); err != nil {
11048		scope.UpdateErr("ParseExample", err)
11049		return
11050	}
11051	if sparse_values, idx, err = makeOutputList(op, idx, "sparse_values"); err != nil {
11052		scope.UpdateErr("ParseExample", err)
11053		return
11054	}
11055	if sparse_shapes, idx, err = makeOutputList(op, idx, "sparse_shapes"); err != nil {
11056		scope.UpdateErr("ParseExample", err)
11057		return
11058	}
11059	if dense_values, idx, err = makeOutputList(op, idx, "dense_values"); err != nil {
11060		scope.UpdateErr("ParseExample", err)
11061		return
11062	}
11063	return sparse_indices, sparse_values, sparse_shapes, dense_values
11064}
11065
11066// Compute the pairwise cross product.
11067//
11068// `a` and `b` must be the same shape; they can either be simple 3-element vectors,
11069// or any shape where the innermost dimension is 3. In the latter case, each pair
11070// of corresponding 3-element vectors is cross-multiplied independently.
11071//
11072// Arguments:
11073//	a: A tensor containing 3-element vectors.
11074//	b: Another tensor, of same type and shape as `a`.
11075//
11076// Returns Pairwise cross product of the vectors in `a` and `b`.
11077func Cross(scope *Scope, a tf.Output, b tf.Output) (product tf.Output) {
11078	if scope.Err() != nil {
11079		return
11080	}
11081	opspec := tf.OpSpec{
11082		Type: "Cross",
11083		Input: []tf.Input{
11084			a, b,
11085		},
11086	}
11087	op := scope.AddOperation(opspec)
11088	return op.Output(0)
11089}
11090
11091// LeakyReluAttr is an optional argument to LeakyRelu.
11092type LeakyReluAttr func(optionalAttr)
11093
11094// LeakyReluAlpha sets the optional alpha attribute to value.
11095// If not specified, defaults to 0.2
11096func LeakyReluAlpha(value float32) LeakyReluAttr {
11097	return func(m optionalAttr) {
11098		m["alpha"] = value
11099	}
11100}
11101
11102// Computes rectified linear: `max(features, features * alpha)`.
11103func LeakyRelu(scope *Scope, features tf.Output, optional ...LeakyReluAttr) (activations tf.Output) {
11104	if scope.Err() != nil {
11105		return
11106	}
11107	attrs := map[string]interface{}{}
11108	for _, a := range optional {
11109		a(attrs)
11110	}
11111	opspec := tf.OpSpec{
11112		Type: "LeakyRelu",
11113		Input: []tf.Input{
11114			features,
11115		},
11116		Attrs: attrs,
11117	}
11118	op := scope.AddOperation(opspec)
11119	return op.Output(0)
11120}
11121
11122// Outputs random integers from a uniform distribution.
11123//
11124// The generated values are uniform integers in the range `[minval, maxval)`.
11125// The lower bound `minval` is included in the range, while the upper bound
11126// `maxval` is excluded.
11127//
11128// The random integers are slightly biased unless `maxval - minval` is an exact
11129// power of two.  The bias is small for values of `maxval - minval` significantly
11130// smaller than the range of the output (either `2^32` or `2^64`).
11131//
11132// Arguments:
11133//	resource: The handle of the resource variable that stores the state of the RNG.
11134//	algorithm: The RNG algorithm.
11135//	shape: The shape of the output tensor.
11136//	minval: Minimum value (inclusive, scalar).
11137//	maxval: Maximum value (exclusive, scalar).
11138//
11139// Returns Random values with specified shape.
11140func StatefulUniformInt(scope *Scope, resource tf.Output, algorithm tf.Output, shape tf.Output, minval tf.Output, maxval tf.Output) (output tf.Output) {
11141	if scope.Err() != nil {
11142		return
11143	}
11144	opspec := tf.OpSpec{
11145		Type: "StatefulUniformInt",
11146		Input: []tf.Input{
11147			resource, algorithm, shape, minval, maxval,
11148		},
11149	}
11150	op := scope.AddOperation(opspec)
11151	return op.Output(0)
11152}
11153
11154// DecodeAndCropJpegAttr is an optional argument to DecodeAndCropJpeg.
11155type DecodeAndCropJpegAttr func(optionalAttr)
11156
11157// DecodeAndCropJpegChannels sets the optional channels attribute to value.
11158//
11159// value: Number of color channels for the decoded image.
11160// If not specified, defaults to 0
11161func DecodeAndCropJpegChannels(value int64) DecodeAndCropJpegAttr {
11162	return func(m optionalAttr) {
11163		m["channels"] = value
11164	}
11165}
11166
11167// DecodeAndCropJpegRatio sets the optional ratio attribute to value.
11168//
11169// value: Downscaling ratio.
11170// If not specified, defaults to 1
11171func DecodeAndCropJpegRatio(value int64) DecodeAndCropJpegAttr {
11172	return func(m optionalAttr) {
11173		m["ratio"] = value
11174	}
11175}
11176
11177// DecodeAndCropJpegFancyUpscaling sets the optional fancy_upscaling attribute to value.
11178//
11179// value: If true use a slower but nicer upscaling of the
11180// chroma planes (yuv420/422 only).
11181// If not specified, defaults to true
11182func DecodeAndCropJpegFancyUpscaling(value bool) DecodeAndCropJpegAttr {
11183	return func(m optionalAttr) {
11184		m["fancy_upscaling"] = value
11185	}
11186}
11187
11188// DecodeAndCropJpegTryRecoverTruncated sets the optional try_recover_truncated attribute to value.
11189//
11190// value: If true try to recover an image from truncated input.
11191// If not specified, defaults to false
11192func DecodeAndCropJpegTryRecoverTruncated(value bool) DecodeAndCropJpegAttr {
11193	return func(m optionalAttr) {
11194		m["try_recover_truncated"] = value
11195	}
11196}
11197
11198// DecodeAndCropJpegAcceptableFraction sets the optional acceptable_fraction attribute to value.
11199//
11200// value: The minimum required fraction of lines before a truncated
11201// input is accepted.
11202// If not specified, defaults to 1
11203func DecodeAndCropJpegAcceptableFraction(value float32) DecodeAndCropJpegAttr {
11204	return func(m optionalAttr) {
11205		m["acceptable_fraction"] = value
11206	}
11207}
11208
11209// DecodeAndCropJpegDctMethod sets the optional dct_method attribute to value.
11210//
11211// value: string specifying a hint about the algorithm used for
11212// decompression.  Defaults to "" which maps to a system-specific
11213// default.  Currently valid values are ["INTEGER_FAST",
11214// "INTEGER_ACCURATE"].  The hint may be ignored (e.g., the internal
11215// jpeg library changes to a version that does not have that specific
11216// option.)
11217// If not specified, defaults to ""
11218func DecodeAndCropJpegDctMethod(value string) DecodeAndCropJpegAttr {
11219	return func(m optionalAttr) {
11220		m["dct_method"] = value
11221	}
11222}
11223
11224// Decode and Crop a JPEG-encoded image to a uint8 tensor.
11225//
11226// The attr `channels` indicates the desired number of color channels for the
11227// decoded image.
11228//
11229// Accepted values are:
11230//
11231// *   0: Use the number of channels in the JPEG-encoded image.
11232// *   1: output a grayscale image.
11233// *   3: output an RGB image.
11234//
11235// If needed, the JPEG-encoded image is transformed to match the requested number
11236// of color channels.
11237//
11238// The attr `ratio` allows downscaling the image by an integer factor during
11239// decoding.  Allowed values are: 1, 2, 4, and 8.  This is much faster than
11240// downscaling the image later.
11241//
11242//
11243// It is equivalent to a combination of decode and crop, but much faster by only
11244// decoding partial jpeg image.
11245//
11246// Arguments:
11247//	contents: 0-D.  The JPEG-encoded image.
11248//	crop_window: 1-D.  The crop window: [crop_y, crop_x, crop_height, crop_width].
11249//
11250// Returns 3-D with shape `[height, width, channels]`..
11251func DecodeAndCropJpeg(scope *Scope, contents tf.Output, crop_window tf.Output, optional ...DecodeAndCropJpegAttr) (image tf.Output) {
11252	if scope.Err() != nil {
11253		return
11254	}
11255	attrs := map[string]interface{}{}
11256	for _, a := range optional {
11257		a(attrs)
11258	}
11259	opspec := tf.OpSpec{
11260		Type: "DecodeAndCropJpeg",
11261		Input: []tf.Input{
11262			contents, crop_window,
11263		},
11264		Attrs: attrs,
11265	}
11266	op := scope.AddOperation(opspec)
11267	return op.Output(0)
11268}
11269
11270// StatefulStandardNormalV2Attr is an optional argument to StatefulStandardNormalV2.
11271type StatefulStandardNormalV2Attr func(optionalAttr)
11272
11273// StatefulStandardNormalV2Dtype sets the optional dtype attribute to value.
11274//
11275// value: The type of the output.
11276// If not specified, defaults to DT_FLOAT
11277func StatefulStandardNormalV2Dtype(value tf.DataType) StatefulStandardNormalV2Attr {
11278	return func(m optionalAttr) {
11279		m["dtype"] = value
11280	}
11281}
11282
11283// Outputs random values from a normal distribution.
11284//
11285// The generated values will have mean 0 and standard deviation 1.
11286//
11287// Arguments:
11288//	resource: The handle of the resource variable that stores the state of the RNG.
11289//	algorithm: The RNG algorithm.
11290//	shape: The shape of the output tensor.
11291//
11292// Returns A tensor of the specified shape filled with random normal values.
11293func StatefulStandardNormalV2(scope *Scope, resource tf.Output, algorithm tf.Output, shape tf.Output, optional ...StatefulStandardNormalV2Attr) (output tf.Output) {
11294	if scope.Err() != nil {
11295		return
11296	}
11297	attrs := map[string]interface{}{}
11298	for _, a := range optional {
11299		a(attrs)
11300	}
11301	opspec := tf.OpSpec{
11302		Type: "StatefulStandardNormalV2",
11303		Input: []tf.Input{
11304			resource, algorithm, shape,
11305		},
11306		Attrs: attrs,
11307	}
11308	op := scope.AddOperation(opspec)
11309	return op.Output(0)
11310}
11311
11312// StatefulUniformFullIntAttr is an optional argument to StatefulUniformFullInt.
11313type StatefulUniformFullIntAttr func(optionalAttr)
11314
11315// StatefulUniformFullIntDtype sets the optional dtype attribute to value.
11316//
11317// value: The type of the output.
11318// If not specified, defaults to DT_UINT64
11319func StatefulUniformFullIntDtype(value tf.DataType) StatefulUniformFullIntAttr {
11320	return func(m optionalAttr) {
11321		m["dtype"] = value
11322	}
11323}
11324
11325// Outputs random integers from a uniform distribution.
11326//
11327// The generated values are uniform integers covering the whole range of `dtype`.
11328//
11329// Arguments:
11330//	resource: The handle of the resource variable that stores the state of the RNG.
11331//	algorithm: The RNG algorithm.
11332//	shape: The shape of the output tensor.
11333//
11334// Returns Random values with specified shape.
11335func StatefulUniformFullInt(scope *Scope, resource tf.Output, algorithm tf.Output, shape tf.Output, optional ...StatefulUniformFullIntAttr) (output tf.Output) {
11336	if scope.Err() != nil {
11337		return
11338	}
11339	attrs := map[string]interface{}{}
11340	for _, a := range optional {
11341		a(attrs)
11342	}
11343	opspec := tf.OpSpec{
11344		Type: "StatefulUniformFullInt",
11345		Input: []tf.Input{
11346			resource, algorithm, shape,
11347		},
11348		Attrs: attrs,
11349	}
11350	op := scope.AddOperation(opspec)
11351	return op.Output(0)
11352}
11353
11354// Locks a mutex resource.  The output is the lock.  So long as the lock tensor
11355//
11356// is alive, any other request to use `MutexLock` with this mutex will wait.
11357//
11358// This is particularly useful for creating a critical section when used in
11359// conjunction with `MutexLockIdentity`:
11360//
11361// ```python
11362//
11363// mutex = mutex_v2(
11364//   shared_name=handle_name, container=container, name=name)
11365//
11366// def execute_in_critical_section(fn, *args, **kwargs):
11367//   lock = gen_resource_variable_ops.mutex_lock(mutex)
11368//
11369//   with ops.control_dependencies([lock]):
11370//     r = fn(*args, **kwargs)
11371//
11372//   with ops.control_dependencies(nest.flatten(r)):
11373//     with ops.colocate_with(mutex):
11374//       ensure_lock_exists = mutex_lock_identity(lock)
11375//
11376//     # Make sure that if any element of r is accessed, all of
11377//     # them are executed together.
11378//     r = nest.map_structure(tf.identity, r)
11379//
11380//   with ops.control_dependencies([ensure_lock_exists]):
11381//     return nest.map_structure(tf.identity, r)
11382// ```
11383//
11384// While `fn` is running in the critical section, no other functions which wish to
11385// use this critical section may run.
11386//
11387// Often the use case is that two executions of the same graph, in parallel,
11388// wish to run `fn`; and we wish to ensure that only one of them executes
11389// at a time.  This is especially important if `fn` modifies one or more
11390// variables at a time.
11391//
11392// It is also useful if two separate functions must share a resource, but we
11393// wish to ensure the usage is exclusive.
11394//
11395// Arguments:
11396//	mutex: The mutex resource to lock.
11397//
11398// Returns A tensor that keeps a shared pointer to a lock on the mutex;
11399// when the Tensor is destroyed, the use count on the shared pointer is decreased
11400// by 1.  When it reaches 0, the lock is released.
11401func MutexLock(scope *Scope, mutex tf.Output) (mutex_lock tf.Output) {
11402	if scope.Err() != nil {
11403		return
11404	}
11405	opspec := tf.OpSpec{
11406		Type: "MutexLock",
11407		Input: []tf.Input{
11408			mutex,
11409		},
11410	}
11411	op := scope.AddOperation(opspec)
11412	return op.Output(0)
11413}
11414
11415// Transforms a serialized tensorflow.TensorProto proto into a Tensor.
11416//
11417// Arguments:
11418//	serialized: A scalar string containing a serialized TensorProto proto.
11419//	out_type: The type of the serialized tensor.  The provided type must match the
11420// type of the serialized tensor and no implicit conversion will take place.
11421//
11422// Returns A Tensor of type `out_type`.
11423func ParseTensor(scope *Scope, serialized tf.Output, out_type tf.DataType) (output tf.Output) {
11424	if scope.Err() != nil {
11425		return
11426	}
11427	attrs := map[string]interface{}{"out_type": out_type}
11428	opspec := tf.OpSpec{
11429		Type: "ParseTensor",
11430		Input: []tf.Input{
11431			serialized,
11432		},
11433		Attrs: attrs,
11434	}
11435	op := scope.AddOperation(opspec)
11436	return op.Output(0)
11437}
11438
11439// MaxPoolWithArgmaxAttr is an optional argument to MaxPoolWithArgmax.
11440type MaxPoolWithArgmaxAttr func(optionalAttr)
11441
11442// MaxPoolWithArgmaxTargmax sets the optional Targmax attribute to value.
11443// If not specified, defaults to DT_INT64
11444func MaxPoolWithArgmaxTargmax(value tf.DataType) MaxPoolWithArgmaxAttr {
11445	return func(m optionalAttr) {
11446		m["Targmax"] = value
11447	}
11448}
11449
11450// MaxPoolWithArgmaxIncludeBatchInIndex sets the optional include_batch_in_index attribute to value.
11451//
11452// value: Whether to include batch dimension in flattened index of `argmax`.
11453// If not specified, defaults to false
11454func MaxPoolWithArgmaxIncludeBatchInIndex(value bool) MaxPoolWithArgmaxAttr {
11455	return func(m optionalAttr) {
11456		m["include_batch_in_index"] = value
11457	}
11458}
11459
11460// Performs max pooling on the input and outputs both max values and indices.
11461//
11462// The indices in `argmax` are flattened, so that a maximum value at position
11463// `[b, y, x, c]` becomes flattened index:
11464// `(y * width + x) * channels + c` if `include_batch_in_index` is False;
11465// `((b * height + y) * width + x) * channels + c` if `include_batch_in_index` is True.
11466//
11467// The indices returned are always in `[0, height) x [0, width)` before flattening,
11468// even if padding is involved and the mathematically correct answer is outside
11469// (either negative or too large).  This is a bug, but fixing it is difficult to do
11470// in a safe backwards compatible way, especially due to flattening.
11471//
11472// Arguments:
11473//	input: 4-D with shape `[batch, height, width, channels]`.  Input to pool over.
11474//	ksize: The size of the window for each dimension of the input tensor.
11475//	strides: The stride of the sliding window for each dimension of the
11476// input tensor.
11477//	padding: The type of padding algorithm to use.
11478//
11479// Returns The max pooled output tensor.4-D.  The flattened indices of the max values chosen for each output.
11480func MaxPoolWithArgmax(scope *Scope, input tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPoolWithArgmaxAttr) (output tf.Output, argmax tf.Output) {
11481	if scope.Err() != nil {
11482		return
11483	}
11484	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
11485	for _, a := range optional {
11486		a(attrs)
11487	}
11488	opspec := tf.OpSpec{
11489		Type: "MaxPoolWithArgmax",
11490		Input: []tf.Input{
11491			input,
11492		},
11493		Attrs: attrs,
11494	}
11495	op := scope.AddOperation(opspec)
11496	return op.Output(0), op.Output(1)
11497}
11498
11499// ModelDatasetAttr is an optional argument to ModelDataset.
11500type ModelDatasetAttr func(optionalAttr)
11501
11502// ModelDatasetCpuBudget sets the optional cpu_budget attribute to value.
11503// If not specified, defaults to 0
11504func ModelDatasetCpuBudget(value int64) ModelDatasetAttr {
11505	return func(m optionalAttr) {
11506		m["cpu_budget"] = value
11507	}
11508}
11509
11510// Identity transformation that models performance.
11511//
11512// Identity transformation that models performance.
11513//
11514// Arguments:
11515//	input_dataset: A variant tensor representing the input dataset.
11516//
11517//
11518func ModelDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ModelDatasetAttr) (handle tf.Output) {
11519	if scope.Err() != nil {
11520		return
11521	}
11522	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
11523	for _, a := range optional {
11524		a(attrs)
11525	}
11526	opspec := tf.OpSpec{
11527		Type: "ModelDataset",
11528		Input: []tf.Input{
11529			input_dataset,
11530		},
11531		Attrs: attrs,
11532	}
11533	op := scope.AddOperation(opspec)
11534	return op.Output(0)
11535}
11536
11537// Fast Fourier transform.
11538//
11539// Computes the 1-dimensional discrete Fourier transform over the inner-most
11540// dimension of `input`.
11541//
11542// Arguments:
11543//	input: A complex tensor.
11544//
11545// Returns A complex tensor of the same shape as `input`. The inner-most
11546//   dimension of `input` is replaced with its 1D Fourier transform.
11547//
11548// @compatibility(numpy)
11549// Equivalent to np.fft.fft
11550// @end_compatibility
11551func FFT(scope *Scope, input tf.Output) (output tf.Output) {
11552	if scope.Err() != nil {
11553		return
11554	}
11555	opspec := tf.OpSpec{
11556		Type: "FFT",
11557		Input: []tf.Input{
11558			input,
11559		},
11560	}
11561	op := scope.AddOperation(opspec)
11562	return op.Output(0)
11563}
11564
11565// MaxPoolAttr is an optional argument to MaxPool.
11566type MaxPoolAttr func(optionalAttr)
11567
11568// MaxPoolDataFormat sets the optional data_format attribute to value.
11569//
11570// value: Specify the data format of the input and output data. With the
11571// default format "NHWC", the data is stored in the order of:
11572//     [batch, in_height, in_width, in_channels].
11573// Alternatively, the format could be "NCHW", the data storage order of:
11574//     [batch, in_channels, in_height, in_width].
11575// If not specified, defaults to "NHWC"
11576func MaxPoolDataFormat(value string) MaxPoolAttr {
11577	return func(m optionalAttr) {
11578		m["data_format"] = value
11579	}
11580}
11581
11582// Performs max pooling on the input.
11583//
11584// Arguments:
11585//	input: 4-D input to pool over.
11586//	ksize: The size of the window for each dimension of the input tensor.
11587//	strides: The stride of the sliding window for each dimension of the
11588// input tensor.
11589//	padding: The type of padding algorithm to use.
11590//
11591// Returns The max pooled output tensor.
11592func MaxPool(scope *Scope, input tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPoolAttr) (output tf.Output) {
11593	if scope.Err() != nil {
11594		return
11595	}
11596	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
11597	for _, a := range optional {
11598		a(attrs)
11599	}
11600	opspec := tf.OpSpec{
11601		Type: "MaxPool",
11602		Input: []tf.Input{
11603			input,
11604		},
11605		Attrs: attrs,
11606	}
11607	op := scope.AddOperation(opspec)
11608	return op.Output(0)
11609}
11610
11611// Multiplies sparse updates into the variable referenced by `resource`.
11612//
11613// This operation computes
11614//
11615//     # Scalar indices
11616//     ref[indices, ...] *= updates[...]
11617//
11618//     # Vector indices (for each i)
11619//     ref[indices[i], ...] *= updates[i, ...]
11620//
11621//     # High rank indices (for each i, ..., j)
11622//     ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...]
11623//
11624// Duplicate entries are handled correctly: if multiple `indices` reference
11625// the same location, their contributions multiply.
11626//
11627// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
11628//
11629// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
11630// <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
11631// </div>
11632//
11633// Arguments:
11634//	resource: Should be from a `Variable` node.
11635//	indices: A tensor of indices into the first dimension of `ref`.
11636//	updates: A tensor of updated values to add to `ref`.
11637//
11638// Returns the created operation.
11639func ResourceScatterMul(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation) {
11640	if scope.Err() != nil {
11641		return
11642	}
11643	opspec := tf.OpSpec{
11644		Type: "ResourceScatterMul",
11645		Input: []tf.Input{
11646			resource, indices, updates,
11647		},
11648	}
11649	return scope.AddOperation(opspec)
11650}
11651
11652// Subtracts sparse updates from the variable referenced by `resource`.
11653//
11654// This operation computes
11655//
11656//     # Scalar indices
11657//     ref[indices, ...] -= updates[...]
11658//
11659//     # Vector indices (for each i)
11660//     ref[indices[i], ...] -= updates[i, ...]
11661//
11662//     # High rank indices (for each i, ..., j)
11663//     ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...]
11664//
11665// Duplicate entries are handled correctly: if multiple `indices` reference
11666// the same location, their contributions add.
11667//
11668// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
11669//
11670// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
11671// <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
11672// </div>
11673//
11674// Arguments:
11675//	resource: Should be from a `Variable` node.
11676//	indices: A tensor of indices into the first dimension of `ref`.
11677//	updates: A tensor of updated values to add to `ref`.
11678//
11679// Returns the created operation.
11680func ResourceScatterSub(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation) {
11681	if scope.Err() != nil {
11682		return
11683	}
11684	opspec := tf.OpSpec{
11685		Type: "ResourceScatterSub",
11686		Input: []tf.Input{
11687			resource, indices, updates,
11688		},
11689	}
11690	return scope.AddOperation(opspec)
11691}
11692
11693// Adds sparse updates to the variable referenced by `resource`.
11694//
11695// This operation computes
11696//
11697//     # Scalar indices
11698//     ref[indices, ...] += updates[...]
11699//
11700//     # Vector indices (for each i)
11701//     ref[indices[i], ...] += updates[i, ...]
11702//
11703//     # High rank indices (for each i, ..., j)
11704//     ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]
11705//
11706// Duplicate entries are handled correctly: if multiple `indices` reference
11707// the same location, their contributions add.
11708//
11709// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
11710//
11711// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
11712// <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
11713// </div>
11714//
11715// Arguments:
11716//	resource: Should be from a `Variable` node.
11717//	indices: A tensor of indices into the first dimension of `ref`.
11718//	updates: A tensor of updated values to add to `ref`.
11719//
11720// Returns the created operation.
11721func ResourceScatterAdd(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation) {
11722	if scope.Err() != nil {
11723		return
11724	}
11725	opspec := tf.OpSpec{
11726		Type: "ResourceScatterAdd",
11727		Input: []tf.Input{
11728			resource, indices, updates,
11729		},
11730	}
11731	return scope.AddOperation(opspec)
11732}
11733
11734// Reads the value of a variable.
11735//
11736// The tensor returned by this operation is immutable.
11737//
11738// The value returned by this operation is guaranteed to be influenced by all the
11739// writes on which this operation depends directly or indirectly, and to not be
11740// influenced by any of the writes which depend directly or indirectly on this
11741// operation.
11742//
11743// Arguments:
11744//	resource: handle to the resource in which to store the variable.
11745//	dtype: the dtype of the value.
11746func ReadVariableOp(scope *Scope, resource tf.Output, dtype tf.DataType) (value tf.Output) {
11747	if scope.Err() != nil {
11748		return
11749	}
11750	attrs := map[string]interface{}{"dtype": dtype}
11751	opspec := tf.OpSpec{
11752		Type: "ReadVariableOp",
11753		Input: []tf.Input{
11754			resource,
11755		},
11756		Attrs: attrs,
11757	}
11758	op := scope.AddOperation(opspec)
11759	return op.Output(0)
11760}
11761
11762// ResourceSparseApplyProximalAdagradAttr is an optional argument to ResourceSparseApplyProximalAdagrad.
11763type ResourceSparseApplyProximalAdagradAttr func(optionalAttr)
11764
11765// ResourceSparseApplyProximalAdagradUseLocking sets the optional use_locking attribute to value.
11766//
11767// value: If True, updating of the var and accum tensors will be protected by
11768// a lock; otherwise the behavior is undefined, but may exhibit less contention.
11769// If not specified, defaults to false
11770func ResourceSparseApplyProximalAdagradUseLocking(value bool) ResourceSparseApplyProximalAdagradAttr {
11771	return func(m optionalAttr) {
11772		m["use_locking"] = value
11773	}
11774}
11775
11776// Sparse update entries in '*var' and '*accum' according to FOBOS algorithm.
11777//
11778// That is for rows we have grad for, we update var and accum as follows:
11779// accum += grad * grad
11780// prox_v = var
11781// prox_v -= lr * grad * (1 / sqrt(accum))
11782// var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
11783//
11784// Arguments:
11785//	var_: Should be from a Variable().
11786//	accum: Should be from a Variable().
11787//	lr: Learning rate. Must be a scalar.
11788//	l1: L1 regularization. Must be a scalar.
11789//	l2: L2 regularization. Must be a scalar.
11790//	grad: The gradient.
11791//	indices: A vector of indices into the first dimension of var and accum.
11792//
11793// Returns the created operation.
11794func ResourceSparseApplyProximalAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyProximalAdagradAttr) (o *tf.Operation) {
11795	if scope.Err() != nil {
11796		return
11797	}
11798	attrs := map[string]interface{}{}
11799	for _, a := range optional {
11800		a(attrs)
11801	}
11802	opspec := tf.OpSpec{
11803		Type: "ResourceSparseApplyProximalAdagrad",
11804		Input: []tf.Input{
11805			var_, accum, lr, l1, l2, grad, indices,
11806		},
11807		Attrs: attrs,
11808	}
11809	return scope.AddOperation(opspec)
11810}
11811
11812// DecodeJpegAttr is an optional argument to DecodeJpeg.
11813type DecodeJpegAttr func(optionalAttr)
11814
11815// DecodeJpegChannels sets the optional channels attribute to value.
11816//
11817// value: Number of color channels for the decoded image.
11818// If not specified, defaults to 0
11819func DecodeJpegChannels(value int64) DecodeJpegAttr {
11820	return func(m optionalAttr) {
11821		m["channels"] = value
11822	}
11823}
11824
11825// DecodeJpegRatio sets the optional ratio attribute to value.
11826//
11827// value: Downscaling ratio.
11828// If not specified, defaults to 1
11829func DecodeJpegRatio(value int64) DecodeJpegAttr {
11830	return func(m optionalAttr) {
11831		m["ratio"] = value
11832	}
11833}
11834
11835// DecodeJpegFancyUpscaling sets the optional fancy_upscaling attribute to value.
11836//
11837// value: If true use a slower but nicer upscaling of the
11838// chroma planes (yuv420/422 only).
11839// If not specified, defaults to true
11840func DecodeJpegFancyUpscaling(value bool) DecodeJpegAttr {
11841	return func(m optionalAttr) {
11842		m["fancy_upscaling"] = value
11843	}
11844}
11845
11846// DecodeJpegTryRecoverTruncated sets the optional try_recover_truncated attribute to value.
11847//
11848// value: If true try to recover an image from truncated input.
11849// If not specified, defaults to false
11850func DecodeJpegTryRecoverTruncated(value bool) DecodeJpegAttr {
11851	return func(m optionalAttr) {
11852		m["try_recover_truncated"] = value
11853	}
11854}
11855
11856// DecodeJpegAcceptableFraction sets the optional acceptable_fraction attribute to value.
11857//
11858// value: The minimum required fraction of lines before a truncated
11859// input is accepted.
11860// If not specified, defaults to 1
11861func DecodeJpegAcceptableFraction(value float32) DecodeJpegAttr {
11862	return func(m optionalAttr) {
11863		m["acceptable_fraction"] = value
11864	}
11865}
11866
11867// DecodeJpegDctMethod sets the optional dct_method attribute to value.
11868//
11869// value: string specifying a hint about the algorithm used for
11870// decompression.  Defaults to "" which maps to a system-specific
11871// default.  Currently valid values are ["INTEGER_FAST",
11872// "INTEGER_ACCURATE"].  The hint may be ignored (e.g., the internal
11873// jpeg library changes to a version that does not have that specific
11874// option.)
11875// If not specified, defaults to ""
11876func DecodeJpegDctMethod(value string) DecodeJpegAttr {
11877	return func(m optionalAttr) {
11878		m["dct_method"] = value
11879	}
11880}
11881
11882// Decode a JPEG-encoded image to a uint8 tensor.
11883//
11884// The attr `channels` indicates the desired number of color channels for the
11885// decoded image.
11886//
11887// Accepted values are:
11888//
11889// *   0: Use the number of channels in the JPEG-encoded image.
11890// *   1: output a grayscale image.
11891// *   3: output an RGB image.
11892//
11893// If needed, the JPEG-encoded image is transformed to match the requested number
11894// of color channels.
11895//
11896// The attr `ratio` allows downscaling the image by an integer factor during
11897// decoding.  Allowed values are: 1, 2, 4, and 8.  This is much faster than
11898// downscaling the image later.
11899//
11900//
11901// This op also supports decoding PNGs and non-animated GIFs since the interface is
11902// the same, though it is cleaner to use `tf.image.decode_image`.
11903//
11904// Arguments:
11905//	contents: 0-D.  The JPEG-encoded image.
11906//
11907// Returns 3-D with shape `[height, width, channels]`..
11908func DecodeJpeg(scope *Scope, contents tf.Output, optional ...DecodeJpegAttr) (image tf.Output) {
11909	if scope.Err() != nil {
11910		return
11911	}
11912	attrs := map[string]interface{}{}
11913	for _, a := range optional {
11914		a(attrs)
11915	}
11916	opspec := tf.OpSpec{
11917		Type: "DecodeJpeg",
11918		Input: []tf.Input{
11919			contents,
11920		},
11921		Attrs: attrs,
11922	}
11923	op := scope.AddOperation(opspec)
11924	return op.Output(0)
11925}
11926
11927// DepthwiseConv2dNativeBackpropInputAttr is an optional argument to DepthwiseConv2dNativeBackpropInput.
11928type DepthwiseConv2dNativeBackpropInputAttr func(optionalAttr)
11929
11930// DepthwiseConv2dNativeBackpropInputDataFormat sets the optional data_format attribute to value.
11931//
11932// value: Specify the data format of the input and output data. With the
11933// default format "NHWC", the data is stored in the order of:
11934//     [batch, height, width, channels].
11935// Alternatively, the format could be "NCHW", the data storage order of:
11936//     [batch, channels, height, width].
11937// If not specified, defaults to "NHWC"
11938func DepthwiseConv2dNativeBackpropInputDataFormat(value string) DepthwiseConv2dNativeBackpropInputAttr {
11939	return func(m optionalAttr) {
11940		m["data_format"] = value
11941	}
11942}
11943
11944// DepthwiseConv2dNativeBackpropInputDilations sets the optional dilations attribute to value.
11945//
11946// value: 1-D tensor of length 4.  The dilation factor for each dimension of
11947// `input`. If set to k > 1, there will be k-1 skipped cells between each filter
11948// element on that dimension. The dimension order is determined by the value of
11949// `data_format`, see above for details. Dilations in the batch and depth
11950// dimensions must be 1.
11951// If not specified, defaults to <i:1 i:1 i:1 i:1 >
11952func DepthwiseConv2dNativeBackpropInputDilations(value []int64) DepthwiseConv2dNativeBackpropInputAttr {
11953	return func(m optionalAttr) {
11954		m["dilations"] = value
11955	}
11956}
11957
11958// Computes the gradients of depthwise convolution with respect to the input.
11959//
11960// Arguments:
11961//	input_sizes: An integer vector representing the shape of `input`, based
11962// on `data_format`.  For example, if `data_format` is 'NHWC' then
11963//  `input` is a 4-D `[batch, height, width, channels]` tensor.
11964//	filter: 4-D with shape
11965// `[filter_height, filter_width, in_channels, depthwise_multiplier]`.
11966//	out_backprop: 4-D with shape  based on `data_format`.
11967// For example, if `data_format` is 'NHWC' then
11968// out_backprop shape is `[batch, out_height, out_width, out_channels]`.
11969// Gradients w.r.t. the output of the convolution.
11970//	strides: The stride of the sliding window for each dimension of the input
11971// of the convolution.
11972//	padding: The type of padding algorithm to use.
11973//
11974// Returns 4-D with shape according to `data_format`.  For example, if
11975// `data_format` is 'NHWC', output shape is `[batch, in_height,
11976// in_width, in_channels]`.  Gradient w.r.t. the input of the
11977// convolution.
11978func DepthwiseConv2dNativeBackpropInput(scope *Scope, input_sizes tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...DepthwiseConv2dNativeBackpropInputAttr) (output tf.Output) {
11979	if scope.Err() != nil {
11980		return
11981	}
11982	attrs := map[string]interface{}{"strides": strides, "padding": padding}
11983	for _, a := range optional {
11984		a(attrs)
11985	}
11986	opspec := tf.OpSpec{
11987		Type: "DepthwiseConv2dNativeBackpropInput",
11988		Input: []tf.Input{
11989			input_sizes, filter, out_backprop,
11990		},
11991		Attrs: attrs,
11992	}
11993	op := scope.AddOperation(opspec)
11994	return op.Output(0)
11995}
11996
11997// EditDistanceAttr is an optional argument to EditDistance.
11998type EditDistanceAttr func(optionalAttr)
11999
12000// EditDistanceNormalize sets the optional normalize attribute to value.
12001//
12002// value: boolean (if true, edit distances are normalized by length of truth).
12003//
12004// The output is:
12005// If not specified, defaults to true
12006func EditDistanceNormalize(value bool) EditDistanceAttr {
12007	return func(m optionalAttr) {
12008		m["normalize"] = value
12009	}
12010}
12011
12012// Computes the (possibly normalized) Levenshtein Edit Distance.
12013//
12014// The inputs are variable-length sequences provided by SparseTensors
12015//   (hypothesis_indices, hypothesis_values, hypothesis_shape)
12016// and
12017//   (truth_indices, truth_values, truth_shape).
12018//
12019// The inputs are:
12020//
12021// Arguments:
12022//	hypothesis_indices: The indices of the hypothesis list SparseTensor.
12023// This is an N x R int64 matrix.
12024//	hypothesis_values: The values of the hypothesis list SparseTensor.
12025// This is an N-length vector.
12026//	hypothesis_shape: The shape of the hypothesis list SparseTensor.
12027// This is an R-length vector.
12028//	truth_indices: The indices of the truth list SparseTensor.
12029// This is an M x R int64 matrix.
12030//	truth_values: The values of the truth list SparseTensor.
12031// This is an M-length vector.
12032//	truth_shape: truth indices, vector.
12033//
12034// Returns A dense float tensor with rank R - 1.
12035//
12036// For the example input:
12037//
12038//     // hypothesis represents a 2x1 matrix with variable-length values:
12039//     //   (0,0) = ["a"]
12040//     //   (1,0) = ["b"]
12041//     hypothesis_indices = [[0, 0, 0],
12042//                           [1, 0, 0]]
12043//     hypothesis_values = ["a", "b"]
12044//     hypothesis_shape = [2, 1, 1]
12045//
12046//     // truth represents a 2x2 matrix with variable-length values:
12047//     //   (0,0) = []
12048//     //   (0,1) = ["a"]
12049//     //   (1,0) = ["b", "c"]
12050//     //   (1,1) = ["a"]
12051//     truth_indices = [[0, 1, 0],
12052//                      [1, 0, 0],
12053//                      [1, 0, 1],
12054//                      [1, 1, 0]]
12055//     truth_values = ["a", "b", "c", "a"]
12056//     truth_shape = [2, 2, 2]
12057//     normalize = true
12058//
12059// The output will be:
12060//
12061//     // output is a 2x2 matrix with edit distances normalized by truth lengths.
12062//     output = [[inf, 1.0],  // (0,0): no truth, (0,1): no hypothesis
12063//               [0.5, 1.0]]  // (1,0): addition, (1,1): no hypothesis
12064func EditDistance(scope *Scope, hypothesis_indices tf.Output, hypothesis_values tf.Output, hypothesis_shape tf.Output, truth_indices tf.Output, truth_values tf.Output, truth_shape tf.Output, optional ...EditDistanceAttr) (output tf.Output) {
12065	if scope.Err() != nil {
12066		return
12067	}
12068	attrs := map[string]interface{}{}
12069	for _, a := range optional {
12070		a(attrs)
12071	}
12072	opspec := tf.OpSpec{
12073		Type: "EditDistance",
12074		Input: []tf.Input{
12075			hypothesis_indices, hypothesis_values, hypothesis_shape, truth_indices, truth_values, truth_shape,
12076		},
12077		Attrs: attrs,
12078	}
12079	op := scope.AddOperation(opspec)
12080	return op.Output(0)
12081}
12082
12083// Returns 0 if x == 0, and x * log(y) otherwise, elementwise.
12084func Xlogy(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
12085	if scope.Err() != nil {
12086		return
12087	}
12088	opspec := tf.OpSpec{
12089		Type: "Xlogy",
12090		Input: []tf.Input{
12091			x, y,
12092		},
12093	}
12094	op := scope.AddOperation(opspec)
12095	return op.Output(0)
12096}
12097
12098// Stops gradient computation.
12099//
12100// When executed in a graph, this op outputs its input tensor as-is.
12101//
12102// When building ops to compute gradients, this op prevents the contribution of
12103// its inputs to be taken into account.  Normally, the gradient generator adds ops
12104// to a graph to compute the derivatives of a specified 'loss' by recursively
12105// finding out inputs that contributed to its computation.  If you insert this op
12106// in the graph it inputs are masked from the gradient generator.  They are not
12107// taken into account for computing gradients.
12108//
12109// This is useful any time you want to compute a value with TensorFlow but need
12110// to pretend that the value was a constant. Some examples include:
12111//
12112// *  The *EM* algorithm where the *M-step* should not involve backpropagation
12113//    through the output of the *E-step*.
12114// *  Contrastive divergence training of Boltzmann machines where, when
12115//    differentiating the energy function, the training must not backpropagate
12116//    through the graph that generated the samples from the model.
12117// *  Adversarial training, where no backprop should happen through the adversarial
12118//    example generation process.
12119func StopGradient(scope *Scope, input tf.Output) (output tf.Output) {
12120	if scope.Err() != nil {
12121		return
12122	}
12123	opspec := tf.OpSpec{
12124		Type: "StopGradient",
12125		Input: []tf.Input{
12126			input,
12127		},
12128	}
12129	op := scope.AddOperation(opspec)
12130	return op.Output(0)
12131}
12132
12133// Eagerly executes a python function to compute func(input)->output. The
12134//
12135// semantics of the input, output, and attributes are the same as those for
12136// PyFunc.
12137func EagerPyFunc(scope *Scope, input []tf.Output, token string, Tout []tf.DataType) (output []tf.Output) {
12138	if scope.Err() != nil {
12139		return
12140	}
12141	attrs := map[string]interface{}{"token": token, "Tout": Tout}
12142	opspec := tf.OpSpec{
12143		Type: "EagerPyFunc",
12144		Input: []tf.Input{
12145			tf.OutputList(input),
12146		},
12147		Attrs: attrs,
12148	}
12149	op := scope.AddOperation(opspec)
12150	if scope.Err() != nil {
12151		return
12152	}
12153	var idx int
12154	var err error
12155	if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
12156		scope.UpdateErr("EagerPyFunc", err)
12157		return
12158	}
12159	return output
12160}
12161
12162// Concats all tensors in the list along the 0th dimension.
12163//
12164// Requires that all tensors have the same shape except the first dimension.
12165//
12166// input_handle: The input list.
12167// element_shape: The shape of the uninitialized elements in the list. If the first
12168//   dimension is not -1, it is assumed that all list elements have the same
12169//   leading dim.
12170// leading_dims: The list of leading dims of uninitialized list elements. Used if
12171//   the leading dim of input_handle.element_shape or the element_shape input arg
12172//   is not already set.
12173// tensor: The concated result.
12174// lengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used for computing the gradient.
12175//
12176func TensorListConcatV2(scope *Scope, input_handle tf.Output, element_shape tf.Output, leading_dims tf.Output, element_dtype tf.DataType) (tensor tf.Output, lengths tf.Output) {
12177	if scope.Err() != nil {
12178		return
12179	}
12180	attrs := map[string]interface{}{"element_dtype": element_dtype}
12181	opspec := tf.OpSpec{
12182		Type: "TensorListConcatV2",
12183		Input: []tf.Input{
12184			input_handle, element_shape, leading_dims,
12185		},
12186		Attrs: attrs,
12187	}
12188	op := scope.AddOperation(opspec)
12189	return op.Output(0), op.Output(1)
12190}
12191
12192// MatrixTriangularSolveAttr is an optional argument to MatrixTriangularSolve.
12193type MatrixTriangularSolveAttr func(optionalAttr)
12194
12195// MatrixTriangularSolveLower sets the optional lower attribute to value.
12196//
12197// value: Boolean indicating whether the innermost matrices in `matrix` are
12198// lower or upper triangular.
12199// If not specified, defaults to true
12200func MatrixTriangularSolveLower(value bool) MatrixTriangularSolveAttr {
12201	return func(m optionalAttr) {
12202		m["lower"] = value
12203	}
12204}
12205
12206// MatrixTriangularSolveAdjoint sets the optional adjoint attribute to value.
12207//
12208// value: Boolean indicating whether to solve with `matrix` or its (block-wise)
12209//          adjoint.
12210//
12211// @compatibility(numpy)
12212// Equivalent to scipy.linalg.solve_triangular
12213// @end_compatibility
12214// If not specified, defaults to false
12215func MatrixTriangularSolveAdjoint(value bool) MatrixTriangularSolveAttr {
12216	return func(m optionalAttr) {
12217		m["adjoint"] = value
12218	}
12219}
12220
12221// Solves systems of linear equations with upper or lower triangular matrices by
12222//
12223// backsubstitution.
12224//
12225// `matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form
12226// square matrices. If `lower` is `True` then the strictly upper triangular part
12227// of each inner-most matrix is assumed to be zero and not accessed.
12228// If `lower` is False then the strictly lower triangular part of each inner-most
12229// matrix is assumed to be zero and not accessed.
12230// `rhs` is a tensor of shape `[..., M, K]`.
12231//
12232// The output is a tensor of shape `[..., M, K]`. If `adjoint` is
12233// `True` then the innermost matrices in `output` satisfy matrix equations
12234// `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`.
12235// If `adjoint` is `False` then the strictly then the  innermost matrices in
12236// `output` satisfy matrix equations
12237// `adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`.
12238//
12239// Arguments:
12240//	matrix: Shape is `[..., M, M]`.
12241//	rhs: Shape is `[..., M, K]`.
12242//
12243// Returns Shape is `[..., M, K]`.
12244func MatrixTriangularSolve(scope *Scope, matrix tf.Output, rhs tf.Output, optional ...MatrixTriangularSolveAttr) (output tf.Output) {
12245	if scope.Err() != nil {
12246		return
12247	}
12248	attrs := map[string]interface{}{}
12249	for _, a := range optional {
12250		a(attrs)
12251	}
12252	opspec := tf.OpSpec{
12253		Type: "MatrixTriangularSolve",
12254		Input: []tf.Input{
12255			matrix, rhs,
12256		},
12257		Attrs: attrs,
12258	}
12259	op := scope.AddOperation(opspec)
12260	return op.Output(0)
12261}
12262
12263// Saves tensors in V2 checkpoint format.
12264//
12265// By default, saves the named tensors in full.  If the caller wishes to save
12266// specific slices of full tensors, "shape_and_slices" should be non-empty strings
12267// and correspondingly well-formed.
12268//
12269// Arguments:
12270//	prefix: Must have a single element. The prefix of the V2 checkpoint to which we
12271// write the tensors.
12272//	tensor_names: shape {N}. The names of the tensors to be saved.
12273//	shape_and_slices: shape {N}.  The slice specs of the tensors to be saved.
12274// Empty strings indicate that they are non-partitioned tensors.
12275//	tensors: `N` tensors to save.
12276//
12277// Returns the created operation.
12278func SaveV2(scope *Scope, prefix tf.Output, tensor_names tf.Output, shape_and_slices tf.Output, tensors []tf.Output) (o *tf.Operation) {
12279	if scope.Err() != nil {
12280		return
12281	}
12282	opspec := tf.OpSpec{
12283		Type: "SaveV2",
12284		Input: []tf.Input{
12285			prefix, tensor_names, shape_and_slices, tf.OutputList(tensors),
12286		},
12287	}
12288	return scope.AddOperation(opspec)
12289}
12290
12291// Concatenates quantized tensors along one dimension.
12292//
12293// Arguments:
12294//	concat_dim: 0-D.  The dimension along which to concatenate.  Must be in the
12295// range [0, rank(values)).
12296//	values: The `N` Tensors to concatenate. Their ranks and types must match,
12297// and their sizes must match in all dimensions except `concat_dim`.
12298//	input_mins: The minimum scalar values for each of the input tensors.
12299//	input_maxes: The maximum scalar values for each of the input tensors.
12300//
12301// Returns A `Tensor` with the concatenation of values stacked along the
12302// `concat_dim` dimension.  This tensor's shape matches that of `values` except
12303// in `concat_dim` where it has the sum of the sizes.The float value that the minimum quantized output value represents.The float value that the maximum quantized output value represents.
12304func QuantizedConcat(scope *Scope, concat_dim tf.Output, values []tf.Output, input_mins []tf.Output, input_maxes []tf.Output) (output tf.Output, output_min tf.Output, output_max tf.Output) {
12305	if scope.Err() != nil {
12306		return
12307	}
12308	opspec := tf.OpSpec{
12309		Type: "QuantizedConcat",
12310		Input: []tf.Input{
12311			concat_dim, tf.OutputList(values), tf.OutputList(input_mins), tf.OutputList(input_maxes),
12312		},
12313	}
12314	op := scope.AddOperation(opspec)
12315	return op.Output(0), op.Output(1), op.Output(2)
12316}
12317
12318// Slice a `SparseTensor` based on the `start` and `size`.
12319//
12320// For example, if the input is
12321//
12322//     input_tensor = shape = [2, 7]
12323//     [    a   d e  ]
12324//     [b c          ]
12325//
12326// Graphically the output tensors are:
12327//
12328//     sparse_slice([0, 0], [2, 4]) = shape = [2, 4]
12329//     [    a  ]
12330//     [b c    ]
12331//
12332//     sparse_slice([0, 4], [2, 3]) = shape = [2, 3]
12333//     [ d e  ]
12334//     [      ]
12335//
12336// Arguments:
12337//	indices: 2-D tensor represents the indices of the sparse tensor.
12338//	values: 1-D tensor represents the values of the sparse tensor.
12339//	shape: 1-D. tensor represents the shape of the sparse tensor.
12340//	start: 1-D. tensor represents the start of the slice.
12341//	size: 1-D. tensor represents the size of the slice.
12342// output indices: A list of 1-D tensors represents the indices of the output
12343// sparse tensors.
12344//
12345// Returns A list of 1-D tensors represents the values of the output sparse
12346// tensors.A list of 1-D tensors represents the shape of the output sparse
12347// tensors.
12348func SparseSlice(scope *Scope, indices tf.Output, values tf.Output, shape tf.Output, start tf.Output, size tf.Output) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output) {
12349	if scope.Err() != nil {
12350		return
12351	}
12352	opspec := tf.OpSpec{
12353		Type: "SparseSlice",
12354		Input: []tf.Input{
12355			indices, values, shape, start, size,
12356		},
12357	}
12358	op := scope.AddOperation(opspec)
12359	return op.Output(0), op.Output(1), op.Output(2)
12360}
12361
12362// Runs multiple additive regression ensemble predictors on input instances and
12363//
12364// computes the logits. It is designed to be used during prediction.
12365// It traverses all the trees and calculates the final score for each instance.
12366//
12367// Arguments:
12368//
12369//	bucketized_features: A list of rank 1 Tensors containing bucket id for each
12370// feature.
12371//	logits_dimension: scalar, dimension of the logits, to be used for partial logits
12372// shape.
12373//
12374// Returns Output rank 2 Tensor containing logits for each example.
12375func BoostedTreesPredict(scope *Scope, tree_ensemble_handle tf.Output, bucketized_features []tf.Output, logits_dimension int64) (logits tf.Output) {
12376	if scope.Err() != nil {
12377		return
12378	}
12379	attrs := map[string]interface{}{"logits_dimension": logits_dimension}
12380	opspec := tf.OpSpec{
12381		Type: "BoostedTreesPredict",
12382		Input: []tf.Input{
12383			tree_ensemble_handle, tf.OutputList(bucketized_features),
12384		},
12385		Attrs: attrs,
12386	}
12387	op := scope.AddOperation(opspec)
12388	return op.Output(0)
12389}
12390
12391// Pads a tensor with zeros.
12392//
12393// This operation pads a `input` with zeros according to the `paddings` you
12394// specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is the
12395// rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
12396// how many zeros to add before the contents of `input` in that dimension, and
12397// `paddings[D, 1]` indicates how many zeros to add after the contents of `input`
12398// in that dimension.
12399//
12400// The padded size of each dimension D of the output is:
12401//
12402// `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
12403//
12404// For example:
12405//
12406// ```
12407// # 't' is [[1, 1], [2, 2]]
12408// # 'paddings' is [[1, 1], [2, 2]]
12409// # rank of 't' is 2
12410// pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
12411//                       [0, 0, 1, 1, 0, 0]
12412//                       [0, 0, 2, 2, 0, 0]
12413//                       [0, 0, 0, 0, 0, 0]]
12414// ```
12415//
12416func Pad(scope *Scope, input tf.Output, paddings tf.Output) (output tf.Output) {
12417	if scope.Err() != nil {
12418		return
12419	}
12420	opspec := tf.OpSpec{
12421		Type: "Pad",
12422		Input: []tf.Input{
12423			input, paddings,
12424		},
12425	}
12426	op := scope.AddOperation(opspec)
12427	return op.Output(0)
12428}
12429
12430// Checks whether a resource handle-based variable has been initialized.
12431//
12432// Arguments:
12433//	resource: the input resource handle.
12434//
12435// Returns a scalar boolean which is true if the variable has been
12436// initialized.
12437func VarIsInitializedOp(scope *Scope, resource tf.Output) (is_initialized tf.Output) {
12438	if scope.Err() != nil {
12439		return
12440	}
12441	opspec := tf.OpSpec{
12442		Type: "VarIsInitializedOp",
12443		Input: []tf.Input{
12444			resource,
12445		},
12446	}
12447	op := scope.AddOperation(opspec)
12448	return op.Output(0)
12449}
12450
12451// Returns the min of x and y (i.e. x < y ? x : y) element-wise.
12452//
12453// *NOTE*: `Minimum` supports broadcasting. More about broadcasting
12454// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
12455func Minimum(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
12456	if scope.Err() != nil {
12457		return
12458	}
12459	opspec := tf.OpSpec{
12460		Type: "Minimum",
12461		Input: []tf.Input{
12462			x, y,
12463		},
12464	}
12465	op := scope.AddOperation(opspec)
12466	return op.Output(0)
12467}
12468
12469// Computes scaled exponential linear: `scale * alpha * (exp(features) - 1)`
12470//
12471// if < 0, `scale * features` otherwise.
12472//
12473// To be used together with
12474// `initializer = tf.variance_scaling_initializer(factor=1.0, mode='FAN_IN')`.
12475// For correct dropout, use `tf.contrib.nn.alpha_dropout`.
12476//
12477// See [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
12478func Selu(scope *Scope, features tf.Output) (activations tf.Output) {
12479	if scope.Err() != nil {
12480		return
12481	}
12482	opspec := tf.OpSpec{
12483		Type: "Selu",
12484		Input: []tf.Input{
12485			features,
12486		},
12487	}
12488	op := scope.AddOperation(opspec)
12489	return op.Output(0)
12490}
12491
12492// SetSizeAttr is an optional argument to SetSize.
12493type SetSizeAttr func(optionalAttr)
12494
12495// SetSizeValidateIndices sets the optional validate_indices attribute to value.
12496// If not specified, defaults to true
12497func SetSizeValidateIndices(value bool) SetSizeAttr {
12498	return func(m optionalAttr) {
12499		m["validate_indices"] = value
12500	}
12501}
12502
12503// Number of unique elements along last dimension of input `set`.
12504//
12505// Input `set` is a `SparseTensor` represented by `set_indices`, `set_values`,
12506// and `set_shape`. The last dimension contains values in a set, duplicates are
12507// allowed but ignored.
12508//
12509// If `validate_indices` is `True`, this op validates the order and range of `set`
12510// indices.
12511//
12512// Arguments:
12513//	set_indices: 2D `Tensor`, indices of a `SparseTensor`.
12514//	set_values: 1D `Tensor`, values of a `SparseTensor`.
12515//	set_shape: 1D `Tensor`, shape of a `SparseTensor`.
12516//
12517// Returns For `set` ranked `n`, this is a `Tensor` with rank `n-1`, and the same 1st
12518// `n-1` dimensions as `set`. Each value is the number of unique elements in
12519// the corresponding `[0...n-1]` dimension of `set`.
12520func SetSize(scope *Scope, set_indices tf.Output, set_values tf.Output, set_shape tf.Output, optional ...SetSizeAttr) (size tf.Output) {
12521	if scope.Err() != nil {
12522		return
12523	}
12524	attrs := map[string]interface{}{}
12525	for _, a := range optional {
12526		a(attrs)
12527	}
12528	opspec := tf.OpSpec{
12529		Type: "SetSize",
12530		Input: []tf.Input{
12531			set_indices, set_values, set_shape,
12532		},
12533		Attrs: attrs,
12534	}
12535	op := scope.AddOperation(opspec)
12536	return op.Output(0)
12537}
12538
12539// Adds sparse `updates` to an existing tensor according to `indices`.
12540//
12541// This operation creates a new tensor by adding sparse `updates` to the passed
12542// in `tensor`.
12543// This operation is very similar to `tf.scatter_nd_add`, except that the updates
12544// are added onto an existing tensor (as opposed to a variable). If the memory
12545// for the existing tensor cannot be re-used, a copy is made and updated.
12546//
12547// `indices` is an integer tensor containing indices into a new tensor of shape
12548// `shape`.  The last dimension of `indices` can be at most the rank of `shape`:
12549//
12550//     indices.shape[-1] <= shape.rank
12551//
12552// The last dimension of `indices` corresponds to indices into elements
12553// (if `indices.shape[-1] = shape.rank`) or slices
12554// (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of
12555// `shape`.  `updates` is a tensor with shape
12556//
12557//     indices.shape[:-1] + shape[indices.shape[-1]:]
12558//
12559// The simplest form of tensor_scatter_add is to add individual elements to a
12560// tensor by index. For example, say we want to add 4 elements in a rank-1
12561// tensor with 8 elements.
12562//
12563// In Python, this scatter add operation would look like this:
12564//
12565// ```python
12566//     indices = tf.constant([[4], [3], [1], [7]])
12567//     updates = tf.constant([9, 10, 11, 12])
12568//     tensor = tf.ones([8], dtype=tf.int32)
12569//     updated = tf.tensor_scatter_add(tensor, indices, updates)
12570//     with tf.Session() as sess:
12571//       print(sess.run(scatter))
12572// ```
12573//
12574// The resulting tensor would look like this:
12575//
12576//     [1, 12, 1, 11, 10, 1, 1, 13]
12577//
12578// We can also, insert entire slices of a higher rank tensor all at once. For
12579// example, if we wanted to insert two slices in the first dimension of a
12580// rank-3 tensor with two matrices of new values.
12581//
12582// In Python, this scatter add operation would look like this:
12583//
12584// ```python
12585//     indices = tf.constant([[0], [2]])
12586//     updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
12587//                             [7, 7, 7, 7], [8, 8, 8, 8]],
12588//                            [[5, 5, 5, 5], [6, 6, 6, 6],
12589//                             [7, 7, 7, 7], [8, 8, 8, 8]]])
12590//     tensor = tf.ones([4, 4, 4])
12591//     updated = tf.tensor_scatter_add(tensor, indices, updates)
12592//     with tf.Session() as sess:
12593//       print(sess.run(scatter))
12594// ```
12595//
12596// The resulting tensor would look like this:
12597//
12598//     [[[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]],
12599//      [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]],
12600//      [[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]],
12601//      [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]]
12602//
12603// Note that on CPU, if an out of bound index is found, an error is returned.
12604// On GPU, if an out of bound index is found, the index is ignored.
12605//
12606// Arguments:
12607//	tensor: Tensor to copy/update.
12608//	indices: Index tensor.
12609//	updates: Updates to scatter into output.
12610//
12611// Returns A new tensor copied from tensor and updates added according to the indices.
12612func TensorScatterAdd(scope *Scope, tensor tf.Output, indices tf.Output, updates tf.Output) (output tf.Output) {
12613	if scope.Err() != nil {
12614		return
12615	}
12616	opspec := tf.OpSpec{
12617		Type: "TensorScatterAdd",
12618		Input: []tf.Input{
12619			tensor, indices, updates,
12620		},
12621	}
12622	op := scope.AddOperation(opspec)
12623	return op.Output(0)
12624}
12625
12626// Computes the sign and the log of the absolute value of the determinant of
12627//
12628// one or more square matrices.
12629//
12630// The input is a tensor of shape `[N, M, M]` whose inner-most 2 dimensions
12631// form square matrices. The outputs are two tensors containing the signs and
12632// absolute values of the log determinants for all N input submatrices
12633// `[..., :, :]` such that the determinant = sign*exp(log_abs_determinant).
12634// The log_abs_determinant is computed as det(P)*sum(log(diag(LU))) where LU
12635// is the LU decomposition of the input and P is the corresponding
12636// permutation matrix.
12637//
12638// Arguments:
12639//	input: Shape is `[N, M, M]`.
12640//
12641// Returns The signs of the log determinants of the inputs. Shape is `[N]`.The logs of the absolute values of the determinants
12642// of the N input matrices.  Shape is `[N]`.
12643func LogMatrixDeterminant(scope *Scope, input tf.Output) (sign tf.Output, log_abs_determinant tf.Output) {
12644	if scope.Err() != nil {
12645		return
12646	}
12647	opspec := tf.OpSpec{
12648		Type: "LogMatrixDeterminant",
12649		Input: []tf.Input{
12650			input,
12651		},
12652	}
12653	op := scope.AddOperation(opspec)
12654	return op.Output(0), op.Output(1)
12655}
12656
12657// Says whether the targets are in the top `K` predictions.
12658//
12659// This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
12660// prediction for the target class is among the top `k` predictions among
12661// all predictions for example `i`. Note that the behavior of `InTopK` differs
12662// from the `TopK` op in its handling of ties; if multiple classes have the
12663// same prediction value and straddle the top-`k` boundary, all of those
12664// classes are considered to be in the top `k`.
12665//
12666// More formally, let
12667//
12668//   \\(predictions_i\\) be the predictions for all classes for example `i`,
12669//   \\(targets_i\\) be the target class for example `i`,
12670//   \\(out_i\\) be the output for example `i`,
12671//
12672// $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
12673//
12674// Arguments:
12675//	predictions: A `batch_size` x `classes` tensor.
12676//	targets: A `batch_size` vector of class ids.
12677//	k: Number of top elements to look at for computing precision.
12678//
12679// Returns Computed precision at `k` as a `bool Tensor`.
12680func InTopKV2(scope *Scope, predictions tf.Output, targets tf.Output, k tf.Output) (precision tf.Output) {
12681	if scope.Err() != nil {
12682		return
12683	}
12684	opspec := tf.OpSpec{
12685		Type: "InTopKV2",
12686		Input: []tf.Input{
12687			predictions, targets, k,
12688		},
12689	}
12690	op := scope.AddOperation(opspec)
12691	return op.Output(0)
12692}
12693
12694// Check if the input matches the regex pattern.
12695//
12696// The input is a string tensor of any shape. The pattern is a scalar
12697// string tensor which is applied to every element of the input tensor.
12698// The boolean values (True or False) of the output tensor indicate
12699// if the input matches the regex pattern provided.
12700//
12701// The pattern follows the re2 syntax (https://github.com/google/re2/wiki/Syntax)
12702//
12703// Arguments:
12704//	input: A string tensor of the text to be processed.
12705//	pattern: A scalar string tensor containing the regular expression to match the input.
12706//
12707// Returns A bool tensor with the same shape as `input`.
12708func RegexFullMatch(scope *Scope, input tf.Output, pattern tf.Output) (output tf.Output) {
12709	if scope.Err() != nil {
12710		return
12711	}
12712	opspec := tf.OpSpec{
12713		Type: "RegexFullMatch",
12714		Input: []tf.Input{
12715			input, pattern,
12716		},
12717	}
12718	op := scope.AddOperation(opspec)
12719	return op.Output(0)
12720}
12721
12722// Converts a `RaggedTensor` into a `SparseTensor` with the same values.
12723//
12724// input=ragged.from_nested_row_splits(rt_dense_values, rt_nested_splits)
12725// output=SparseTensor(indices=sparse_indices, values=sparse_values,
12726//                     dense_shape=sparse_dense_shape)
12727//
12728// Arguments:
12729//	rt_nested_splits: The `row_splits` for the `RaggedTensor`.
12730//	rt_dense_values: The `flat_values` for the `RaggedTensor`.
12731//
12732// Returns The indices for the `SparseTensor`.The values of the `SparseTensor`.`sparse_dense_shape` is a tight bounding box of the input `RaggedTensor`.
12733func RaggedTensorToSparse(scope *Scope, rt_nested_splits []tf.Output, rt_dense_values tf.Output) (sparse_indices tf.Output, sparse_values tf.Output, sparse_dense_shape tf.Output) {
12734	if scope.Err() != nil {
12735		return
12736	}
12737	opspec := tf.OpSpec{
12738		Type: "RaggedTensorToSparse",
12739		Input: []tf.Input{
12740			tf.OutputList(rt_nested_splits), rt_dense_values,
12741		},
12742	}
12743	op := scope.AddOperation(opspec)
12744	return op.Output(0), op.Output(1), op.Output(2)
12745}
12746
12747// FusedBatchNormGradV2Attr is an optional argument to FusedBatchNormGradV2.
12748type FusedBatchNormGradV2Attr func(optionalAttr)
12749
12750// FusedBatchNormGradV2Epsilon sets the optional epsilon attribute to value.
12751//
12752// value: A small float number added to the variance of x.
12753// If not specified, defaults to 0.0001
12754func FusedBatchNormGradV2Epsilon(value float32) FusedBatchNormGradV2Attr {
12755	return func(m optionalAttr) {
12756		m["epsilon"] = value
12757	}
12758}
12759
12760// FusedBatchNormGradV2DataFormat sets the optional data_format attribute to value.
12761//
12762// value: The data format for y_backprop, x, x_backprop.
12763// Either "NHWC" (default) or "NCHW".
12764// If not specified, defaults to "NHWC"
12765func FusedBatchNormGradV2DataFormat(value string) FusedBatchNormGradV2Attr {
12766	return func(m optionalAttr) {
12767		m["data_format"] = value
12768	}
12769}
12770
12771// FusedBatchNormGradV2IsTraining sets the optional is_training attribute to value.
12772//
12773// value: A bool value to indicate the operation is for training (default)
12774// or inference.
12775// If not specified, defaults to true
12776func FusedBatchNormGradV2IsTraining(value bool) FusedBatchNormGradV2Attr {
12777	return func(m optionalAttr) {
12778		m["is_training"] = value
12779	}
12780}
12781
12782// Gradient for batch normalization.
12783//
12784// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
12785// The size of 1D Tensors matches the dimension C of the 4D Tensors.
12786//
12787// Arguments:
12788//	y_backprop: A 4D Tensor for the gradient with respect to y.
12789//	x: A 4D Tensor for input data.
12790//	scale: A 1D Tensor for scaling factor, to scale the normalized x.
12791//	reserve_space_1: When is_training is True, a 1D Tensor for the computed batch
12792// mean to be reused in gradient computation. When is_training is
12793// False, a 1D Tensor for the population mean to be reused in both
12794// 1st and 2nd order gradient computation.
12795//	reserve_space_2: When is_training is True, a 1D Tensor for the computed batch
12796// variance (inverted variance in the cuDNN case) to be reused in
12797// gradient computation. When is_training is False, a 1D Tensor
12798// for the population variance to be reused in both 1st and 2nd
12799// order gradient computation.
12800//
12801// Returns A 4D Tensor for the gradient with respect to x.A 1D Tensor for the gradient with respect to scale.A 1D Tensor for the gradient with respect to offset.Unused placeholder to match the mean input in FusedBatchNorm.Unused placeholder to match the variance input
12802// in FusedBatchNorm.
12803func FusedBatchNormGradV2(scope *Scope, y_backprop tf.Output, x tf.Output, scale tf.Output, reserve_space_1 tf.Output, reserve_space_2 tf.Output, optional ...FusedBatchNormGradV2Attr) (x_backprop tf.Output, scale_backprop tf.Output, offset_backprop tf.Output, reserve_space_3 tf.Output, reserve_space_4 tf.Output) {
12804	if scope.Err() != nil {
12805		return
12806	}
12807	attrs := map[string]interface{}{}
12808	for _, a := range optional {
12809		a(attrs)
12810	}
12811	opspec := tf.OpSpec{
12812		Type: "FusedBatchNormGradV2",
12813		Input: []tf.Input{
12814			y_backprop, x, scale, reserve_space_1, reserve_space_2,
12815		},
12816		Attrs: attrs,
12817	}
12818	op := scope.AddOperation(opspec)
12819	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
12820}
12821
12822// Component-wise multiplies a SparseTensor by a dense Tensor.
12823//
12824// The output locations corresponding to the implicitly zero elements in the sparse
12825// tensor will be zero (i.e., will not take up storage space), regardless of the
12826// contents of the dense tensor (even if it's +/-INF and that INF*0 == NaN).
12827//
12828// *Limitation*: this Op only broadcasts the dense side to the sparse side, but not
12829// the other direction.
12830//
12831// Arguments:
12832//	sp_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
12833// SparseTensor, possibly not in canonical ordering.
12834//	sp_values: 1-D.  `N` non-empty values corresponding to `sp_indices`.
12835//	sp_shape: 1-D.  Shape of the input SparseTensor.
12836//	dense: `R`-D.  The dense Tensor operand.
12837//
12838// Returns 1-D.  The `N` values that are operated on.
12839func SparseDenseCwiseMul(scope *Scope, sp_indices tf.Output, sp_values tf.Output, sp_shape tf.Output, dense tf.Output) (output tf.Output) {
12840	if scope.Err() != nil {
12841		return
12842	}
12843	opspec := tf.OpSpec{
12844		Type: "SparseDenseCwiseMul",
12845		Input: []tf.Input{
12846			sp_indices, sp_values, sp_shape, dense,
12847		},
12848	}
12849	op := scope.AddOperation(opspec)
12850	return op.Output(0)
12851}
12852
12853// MaxPool3DGradAttr is an optional argument to MaxPool3DGrad.
12854type MaxPool3DGradAttr func(optionalAttr)
12855
12856// MaxPool3DGradDataFormat sets the optional data_format attribute to value.
12857//
12858// value: The data format of the input and output data. With the
12859// default format "NDHWC", the data is stored in the order of:
12860//     [batch, in_depth, in_height, in_width, in_channels].
12861// Alternatively, the format could be "NCDHW", the data storage order is:
12862//     [batch, in_channels, in_depth, in_height, in_width].
12863// If not specified, defaults to "NDHWC"
12864func MaxPool3DGradDataFormat(value string) MaxPool3DGradAttr {
12865	return func(m optionalAttr) {
12866		m["data_format"] = value
12867	}
12868}
12869
12870// Computes gradients of max pooling function.
12871//
12872// Arguments:
12873//	orig_input: The original input tensor.
12874//	orig_output: The original output tensor.
12875//	grad: Output backprop of shape `[batch, depth, rows, cols, channels]`.
12876//	ksize: 1-D tensor of length 5. The size of the window for each dimension of
12877// the input tensor. Must have `ksize[0] = ksize[4] = 1`.
12878//	strides: 1-D tensor of length 5. The stride of the sliding window for each
12879// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
12880//	padding: The type of padding algorithm to use.
12881func MaxPool3DGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPool3DGradAttr) (output tf.Output) {
12882	if scope.Err() != nil {
12883		return
12884	}
12885	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
12886	for _, a := range optional {
12887		a(attrs)
12888	}
12889	opspec := tf.OpSpec{
12890		Type: "MaxPool3DGrad",
12891		Input: []tf.Input{
12892			orig_input, orig_output, grad,
12893		},
12894		Attrs: attrs,
12895	}
12896	op := scope.AddOperation(opspec)
12897	return op.Output(0)
12898}
12899
12900// Returns the name of the device on which `resource` has been placed.
12901func ExperimentalIteratorGetDevice(scope *Scope, resource tf.Output) (device tf.Output) {
12902	if scope.Err() != nil {
12903		return
12904	}
12905	opspec := tf.OpSpec{
12906		Type: "ExperimentalIteratorGetDevice",
12907		Input: []tf.Input{
12908			resource,
12909		},
12910	}
12911	op := scope.AddOperation(opspec)
12912	return op.Output(0)
12913}
12914
12915// SparseReduceSumAttr is an optional argument to SparseReduceSum.
12916type SparseReduceSumAttr func(optionalAttr)
12917
12918// SparseReduceSumKeepDims sets the optional keep_dims attribute to value.
12919//
12920// value: If true, retain reduced dimensions with length 1.
12921// If not specified, defaults to false
12922func SparseReduceSumKeepDims(value bool) SparseReduceSumAttr {
12923	return func(m optionalAttr) {
12924		m["keep_dims"] = value
12925	}
12926}
12927
12928// Computes the sum of elements across dimensions of a SparseTensor.
12929//
12930// This Op takes a SparseTensor and is the sparse counterpart to
12931// `tf.reduce_sum()`.  In particular, this Op also returns a dense `Tensor`
12932// instead of a sparse one.
12933//
12934// Reduces `sp_input` along the dimensions given in `reduction_axes`.  Unless
12935// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
12936// `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
12937// with length 1.
12938//
12939// If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
12940// with a single element is returned.  Additionally, the axes can be negative,
12941// which are interpreted according to the indexing rules in Python.
12942//
12943// Arguments:
12944//	input_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
12945// SparseTensor, possibly not in canonical ordering.
12946//	input_values: 1-D.  `N` non-empty values corresponding to `input_indices`.
12947//	input_shape: 1-D.  Shape of the input SparseTensor.
12948//	reduction_axes: 1-D.  Length-`K` vector containing the reduction axes.
12949//
12950// Returns `R-K`-D.  The reduced Tensor.
12951func SparseReduceSum(scope *Scope, input_indices tf.Output, input_values tf.Output, input_shape tf.Output, reduction_axes tf.Output, optional ...SparseReduceSumAttr) (output tf.Output) {
12952	if scope.Err() != nil {
12953		return
12954	}
12955	attrs := map[string]interface{}{}
12956	for _, a := range optional {
12957		a(attrs)
12958	}
12959	opspec := tf.OpSpec{
12960		Type: "SparseReduceSum",
12961		Input: []tf.Input{
12962			input_indices, input_values, input_shape, reduction_axes,
12963		},
12964		Attrs: attrs,
12965	}
12966	op := scope.AddOperation(opspec)
12967	return op.Output(0)
12968}
12969
12970// Records the latency of producing `input_dataset` elements in a StatsAggregator.
12971func ExperimentalLatencyStatsDataset(scope *Scope, input_dataset tf.Output, tag tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
12972	if scope.Err() != nil {
12973		return
12974	}
12975	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
12976	opspec := tf.OpSpec{
12977		Type: "ExperimentalLatencyStatsDataset",
12978		Input: []tf.Input{
12979			input_dataset, tag,
12980		},
12981		Attrs: attrs,
12982	}
12983	op := scope.AddOperation(opspec)
12984	return op.Output(0)
12985}
12986
12987// Adds up a `SparseTensor` and a dense `Tensor`, producing a dense `Tensor`.
12988//
12989// This Op does not require `a_indices` be sorted in standard lexicographic order.
12990//
12991// Arguments:
12992//	a_indices: 2-D.  The `indices` of the `SparseTensor`, with shape `[nnz, ndims]`.
12993//	a_values: 1-D.  The `values` of the `SparseTensor`, with shape `[nnz]`.
12994//	a_shape: 1-D.  The `shape` of the `SparseTensor`, with shape `[ndims]`.
12995//	b: `ndims`-D Tensor.  With shape `a_shape`.
12996func SparseTensorDenseAdd(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, b tf.Output) (output tf.Output) {
12997	if scope.Err() != nil {
12998		return
12999	}
13000	opspec := tf.OpSpec{
13001		Type: "SparseTensorDenseAdd",
13002		Input: []tf.Input{
13003			a_indices, a_values, a_shape, b,
13004		},
13005	}
13006	op := scope.AddOperation(opspec)
13007	return op.Output(0)
13008}
13009
13010// Split a `SparseTensor` into `num_split` tensors along one dimension.
13011//
13012// If the `shape[split_dim]` is not an integer multiple of `num_split`. Slices
13013// `[0 : shape[split_dim] % num_split]` gets one extra dimension.
13014// For example, if `split_dim = 1` and `num_split = 2` and the input is
13015//
13016//     input_tensor = shape = [2, 7]
13017//     [    a   d e  ]
13018//     [b c          ]
13019//
13020// Graphically the output tensors are:
13021//
13022//     output_tensor[0] = shape = [2, 4]
13023//     [    a  ]
13024//     [b c    ]
13025//
13026//     output_tensor[1] = shape = [2, 3]
13027//     [ d e  ]
13028//     [      ]
13029//
13030// Arguments:
13031//	split_dim: 0-D.  The dimension along which to split.  Must be in the range
13032// `[0, rank(shape))`.
13033//	indices: 2-D tensor represents the indices of the sparse tensor.
13034//	values: 1-D tensor represents the values of the sparse tensor.
13035//	shape: 1-D. tensor represents the shape of the sparse tensor.
13036// output indices: A list of 1-D tensors represents the indices of the output
13037// sparse tensors.
13038//	num_split: The number of ways to split.
13039//
13040// Returns A list of 1-D tensors represents the values of the output sparse
13041// tensors.A list of 1-D tensors represents the shape of the output sparse
13042// tensors.
13043func SparseSplit(scope *Scope, split_dim tf.Output, indices tf.Output, values tf.Output, shape tf.Output, num_split int64) (output_indices []tf.Output, output_values []tf.Output, output_shape []tf.Output) {
13044	if scope.Err() != nil {
13045		return
13046	}
13047	attrs := map[string]interface{}{"num_split": num_split}
13048	opspec := tf.OpSpec{
13049		Type: "SparseSplit",
13050		Input: []tf.Input{
13051			split_dim, indices, values, shape,
13052		},
13053		Attrs: attrs,
13054	}
13055	op := scope.AddOperation(opspec)
13056	if scope.Err() != nil {
13057		return
13058	}
13059	var idx int
13060	var err error
13061	if output_indices, idx, err = makeOutputList(op, idx, "output_indices"); err != nil {
13062		scope.UpdateErr("SparseSplit", err)
13063		return
13064	}
13065	if output_values, idx, err = makeOutputList(op, idx, "output_values"); err != nil {
13066		scope.UpdateErr("SparseSplit", err)
13067		return
13068	}
13069	if output_shape, idx, err = makeOutputList(op, idx, "output_shape"); err != nil {
13070		scope.UpdateErr("SparseSplit", err)
13071		return
13072	}
13073	return output_indices, output_values, output_shape
13074}
13075
13076// Applies sparse addition to `input` using individual values or slices
13077//
13078// from `updates` according to indices `indices`.  The updates are non-aliasing:
13079// `input` is only modified in-place if no other operations will use it.
13080// Otherwise, a copy of `input` is made.  This operation has a gradient with
13081// respect to both `input` and `updates`.
13082//
13083// `input` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
13084//
13085// `indices` must be integer tensor, containing indices into `input`.
13086// It must be shape \\([d_0, ..., d_{Q-2}, K]\\) where `0 < K <= P`.
13087//
13088// The innermost dimension of `indices` (with length `K`) corresponds to
13089// indices into elements (if `K = P`) or `(P-K)`-dimensional slices
13090// (if `K < P`) along the `K`th dimension of `input`.
13091//
13092// `updates` is `Tensor` of rank `Q-1+P-K` with shape:
13093//
13094// $$[d_0, ..., d_{Q-2}, input.shape[K], ..., input.shape[P-1]].$$
13095//
13096// For example, say we want to add 4 scattered elements to a rank-1 tensor to 8
13097// elements. In Python, that addition would look like this:
13098//
13099//     input = tf.constant([1, 2, 3, 4, 5, 6, 7, 8])
13100//     indices = tf.constant([[4], [3], [1], [7]])
13101//     updates = tf.constant([9, 10, 11, 12])
13102//     output = tf.scatter_nd_non_aliasing_add(input, indices, updates)
13103//     with tf.Session() as sess:
13104//       print(sess.run(output))
13105//
13106// The resulting value `output` would look like this:
13107//
13108//     [1, 13, 3, 14, 14, 6, 7, 20]
13109//
13110// See `tf.scatter_nd` for more details about how to make updates to slices.
13111//
13112// Arguments:
13113//	input: A Tensor.
13114//	indices: A Tensor. Must be one of the following types: `int32`, `int64`.
13115// A tensor of indices into `input`.
13116//	updates: A Tensor. Must have the same type as ref. A tensor of updated values
13117// to add to `input`.
13118//
13119// Returns A `Tensor` with the same shape as `input`, containing values of `input`
13120// updated with `updates`.
13121func ScatterNdNonAliasingAdd(scope *Scope, input tf.Output, indices tf.Output, updates tf.Output) (output tf.Output) {
13122	if scope.Err() != nil {
13123		return
13124	}
13125	opspec := tf.OpSpec{
13126		Type: "ScatterNdNonAliasingAdd",
13127		Input: []tf.Input{
13128			input, indices, updates,
13129		},
13130	}
13131	op := scope.AddOperation(opspec)
13132	return op.Output(0)
13133}
13134
13135// Creates a MultiDeviceIterator resource.
13136//
13137// Arguments:
13138//	devices: A list of devices the iterator works across.
13139//	shared_name: If non-empty, this resource will be shared under the given name
13140// across multiple sessions.
13141//	container: If non-empty, this resource is placed in the given container.
13142// Otherwise, a default container is used.
13143//	output_types: The type list for the return values.
13144//	output_shapes: The list of shapes being produced.
13145//
13146// Returns Handle to the resource created.
13147func MultiDeviceIterator(scope *Scope, devices []string, shared_name string, container string, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
13148	if scope.Err() != nil {
13149		return
13150	}
13151	attrs := map[string]interface{}{"devices": devices, "shared_name": shared_name, "container": container, "output_types": output_types, "output_shapes": output_shapes}
13152	opspec := tf.OpSpec{
13153		Type: "MultiDeviceIterator",
13154
13155		Attrs: attrs,
13156	}
13157	op := scope.AddOperation(opspec)
13158	return op.Output(0)
13159}
13160
13161// FractionalMaxPoolAttr is an optional argument to FractionalMaxPool.
13162type FractionalMaxPoolAttr func(optionalAttr)
13163
13164// FractionalMaxPoolPseudoRandom sets the optional pseudo_random attribute to value.
13165//
13166// value: When set to True, generates the pooling sequence in a
13167// pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin
13168// Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for
13169// difference between pseudorandom and random.
13170// If not specified, defaults to false
13171func FractionalMaxPoolPseudoRandom(value bool) FractionalMaxPoolAttr {
13172	return func(m optionalAttr) {
13173		m["pseudo_random"] = value
13174	}
13175}
13176
13177// FractionalMaxPoolOverlapping sets the optional overlapping attribute to value.
13178//
13179// value: When set to True, it means when pooling, the values at the boundary
13180// of adjacent pooling cells are used by both cells. For example:
13181//
13182// `index  0  1  2  3  4`
13183//
13184// `value  20 5  16 3  7`
13185//
13186// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
13187// The result would be [20, 16] for fractional max pooling.
13188// If not specified, defaults to false
13189func FractionalMaxPoolOverlapping(value bool) FractionalMaxPoolAttr {
13190	return func(m optionalAttr) {
13191		m["overlapping"] = value
13192	}
13193}
13194
13195// FractionalMaxPoolDeterministic sets the optional deterministic attribute to value.
13196//
13197// value: When set to True, a fixed pooling region will be used when
13198// iterating over a FractionalMaxPool node in the computation graph. Mainly used
13199// in unit test to make FractionalMaxPool deterministic.
13200// If not specified, defaults to false
13201func FractionalMaxPoolDeterministic(value bool) FractionalMaxPoolAttr {
13202	return func(m optionalAttr) {
13203		m["deterministic"] = value
13204	}
13205}
13206
13207// FractionalMaxPoolSeed sets the optional seed attribute to value.
13208//
13209// value: If either seed or seed2 are set to be non-zero, the random number
13210// generator is seeded by the given seed.  Otherwise, it is seeded by a
13211// random seed.
13212// If not specified, defaults to 0
13213func FractionalMaxPoolSeed(value int64) FractionalMaxPoolAttr {
13214	return func(m optionalAttr) {
13215		m["seed"] = value
13216	}
13217}
13218
13219// FractionalMaxPoolSeed2 sets the optional seed2 attribute to value.
13220//
13221// value: An second seed to avoid seed collision.
13222// If not specified, defaults to 0
13223func FractionalMaxPoolSeed2(value int64) FractionalMaxPoolAttr {
13224	return func(m optionalAttr) {
13225		m["seed2"] = value
13226	}
13227}
13228
13229// Performs fractional max pooling on the input.
13230//
13231// Fractional max pooling is slightly different than regular max pooling.  In
13232// regular max pooling, you downsize an input set by taking the maximum value of
13233// smaller N x N subsections of the set (often 2x2), and try to reduce the set by
13234// a factor of N, where N is an integer.  Fractional max pooling, as you might
13235// expect from the word "fractional", means that the overall reduction ratio N
13236// does not have to be an integer.
13237//
13238// The sizes of the pooling regions are generated randomly but are fairly uniform.
13239// For example, let's look at the height dimension, and the constraints on the
13240// list of rows that will be pool boundaries.
13241//
13242// First we define the following:
13243//
13244// 1.  input_row_length : the number of rows from the input set
13245// 2.  output_row_length : which will be smaller than the input
13246// 3.  alpha = input_row_length / output_row_length : our reduction ratio
13247// 4.  K = floor(alpha)
13248// 5.  row_pooling_sequence : this is the result list of pool boundary rows
13249//
13250// Then, row_pooling_sequence should satisfy:
13251//
13252// 1.  a[0] = 0 : the first value of the sequence is 0
13253// 2.  a[end] = input_row_length : the last value of the sequence is the size
13254// 3.  K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
13255// 4.  length(row_pooling_sequence) = output_row_length+1
13256//
13257// For more details on fractional max pooling, see this paper:
13258// [Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071)
13259//
13260// Arguments:
13261//	value: 4-D with shape `[batch, height, width, channels]`.
13262//	pooling_ratio: Pooling ratio for each dimension of `value`, currently only
13263// supports row and col dimension and should be >= 1.0. For example, a valid
13264// pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements
13265// must be 1.0 because we don't allow pooling on batch and channels
13266// dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions
13267// respectively.
13268//
13269// Returns output tensor after fractional max pooling.row pooling sequence, needed to calculate gradient.column pooling sequence, needed to calculate gradient.
13270func FractionalMaxPool(scope *Scope, value tf.Output, pooling_ratio []float32, optional ...FractionalMaxPoolAttr) (output tf.Output, row_pooling_sequence tf.Output, col_pooling_sequence tf.Output) {
13271	if scope.Err() != nil {
13272		return
13273	}
13274	attrs := map[string]interface{}{"pooling_ratio": pooling_ratio}
13275	for _, a := range optional {
13276		a(attrs)
13277	}
13278	opspec := tf.OpSpec{
13279		Type: "FractionalMaxPool",
13280		Input: []tf.Input{
13281			value,
13282		},
13283		Attrs: attrs,
13284	}
13285	op := scope.AddOperation(opspec)
13286	return op.Output(0), op.Output(1), op.Output(2)
13287}
13288
13289// Generates sparse cross from a list of sparse and dense tensors.
13290//
13291// The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each
13292// representing features of one feature column. It outputs a 2D `SparseTensor` with
13293// the batchwise crosses of these features.
13294//
13295// For example, if the inputs are
13296//
13297//     inputs[0]: SparseTensor with shape = [2, 2]
13298//     [0, 0]: "a"
13299//     [1, 0]: "b"
13300//     [1, 1]: "c"
13301//
13302//     inputs[1]: SparseTensor with shape = [2, 1]
13303//     [0, 0]: "d"
13304//     [1, 0]: "e"
13305//
13306//     inputs[2]: Tensor [["f"], ["g"]]
13307//
13308// then the output will be
13309//
13310//     shape = [2, 2]
13311//     [0, 0]: "a_X_d_X_f"
13312//     [1, 0]: "b_X_e_X_g"
13313//     [1, 1]: "c_X_e_X_g"
13314//
13315// if hashed_output=true then the output will be
13316//
13317//     shape = [2, 2]
13318//     [0, 0]: FingerprintCat64(
13319//                 Fingerprint64("f"), FingerprintCat64(
13320//                     Fingerprint64("d"), Fingerprint64("a")))
13321//     [1, 0]: FingerprintCat64(
13322//                 Fingerprint64("g"), FingerprintCat64(
13323//                     Fingerprint64("e"), Fingerprint64("b")))
13324//     [1, 1]: FingerprintCat64(
13325//                 Fingerprint64("g"), FingerprintCat64(
13326//                     Fingerprint64("e"), Fingerprint64("c")))
13327//
13328// Arguments:
13329//	indices: 2-D.  Indices of each input `SparseTensor`.
13330//	values: 1-D.   values of each `SparseTensor`.
13331//	shapes: 1-D.   Shapes of each `SparseTensor`.
13332//	dense_inputs: 2-D.    Columns represented by dense `Tensor`.
13333//	hashed_output: If true, returns the hash of the cross instead of the string.
13334// This will allow us avoiding string manipulations.
13335//	num_buckets: It is used if hashed_output is true.
13336// output = hashed_value%num_buckets if num_buckets > 0 else hashed_value.
13337//	hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
13338// function to combine the crosses fingerprints.
13339//
13340//
13341//
13342// Returns 2-D.  Indices of the concatenated `SparseTensor`.1-D.  Non-empty values of the concatenated or hashed
13343// `SparseTensor`.1-D.  Shape of the concatenated `SparseTensor`.
13344func SparseCross(scope *Scope, indices []tf.Output, values []tf.Output, shapes []tf.Output, dense_inputs []tf.Output, hashed_output bool, num_buckets int64, hash_key int64, out_type tf.DataType, internal_type tf.DataType) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output) {
13345	if scope.Err() != nil {
13346		return
13347	}
13348	attrs := map[string]interface{}{"hashed_output": hashed_output, "num_buckets": num_buckets, "hash_key": hash_key, "out_type": out_type, "internal_type": internal_type}
13349	opspec := tf.OpSpec{
13350		Type: "SparseCross",
13351		Input: []tf.Input{
13352			tf.OutputList(indices), tf.OutputList(values), tf.OutputList(shapes), tf.OutputList(dense_inputs),
13353		},
13354		Attrs: attrs,
13355	}
13356	op := scope.AddOperation(opspec)
13357	return op.Output(0), op.Output(1), op.Output(2)
13358}
13359
13360// Inverse real-valued fast Fourier transform.
13361//
13362// Computes the inverse 1-dimensional discrete Fourier transform of a real-valued
13363// signal over the inner-most dimension of `input`.
13364//
13365// The inner-most dimension of `input` is assumed to be the result of `RFFT`: the
13366// `fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If
13367// `fft_length` is not provided, it is computed from the size of the inner-most
13368// dimension of `input` (`fft_length = 2 * (inner - 1)`). If the FFT length used to
13369// compute `input` is odd, it should be provided since it cannot be inferred
13370// properly.
13371//
13372// Along the axis `IRFFT` is computed on, if `fft_length / 2 + 1` is smaller
13373// than the corresponding dimension of `input`, the dimension is cropped. If it is
13374// larger, the dimension is padded with zeros.
13375//
13376// Arguments:
13377//	input: A complex64 tensor.
13378//	fft_length: An int32 tensor of shape [1]. The FFT length.
13379//
13380// Returns A float32 tensor of the same rank as `input`. The inner-most
13381//   dimension of `input` is replaced with the `fft_length` samples of its inverse
13382//   1D Fourier transform.
13383//
13384// @compatibility(numpy)
13385// Equivalent to np.fft.irfft
13386// @end_compatibility
13387func IRFFT(scope *Scope, input tf.Output, fft_length tf.Output) (output tf.Output) {
13388	if scope.Err() != nil {
13389		return
13390	}
13391	opspec := tf.OpSpec{
13392		Type: "IRFFT",
13393		Input: []tf.Input{
13394			input, fft_length,
13395		},
13396	}
13397	op := scope.AddOperation(opspec)
13398	return op.Output(0)
13399}
13400
13401// Concatenates a list of `SparseTensor` along the specified dimension.
13402//
13403// Concatenation is with respect to the dense versions of these sparse tensors.
13404// It is assumed that each input is a `SparseTensor` whose elements are ordered
13405// along increasing dimension number.
13406//
13407// All inputs' shapes must match, except for the concat dimension.  The
13408// `indices`, `values`, and `shapes` lists must have the same length.
13409//
13410// The output shape is identical to the inputs', except along the concat
13411// dimension, where it is the sum of the inputs' sizes along that dimension.
13412//
13413// The output elements will be resorted to preserve the sort order along
13414// increasing dimension number.
13415//
13416// This op runs in `O(M log M)` time, where `M` is the total number of non-empty
13417// values across all inputs. This is due to the need for an internal sort in
13418// order to concatenate efficiently across an arbitrary dimension.
13419//
13420// For example, if `concat_dim = 1` and the inputs are
13421//
13422//     sp_inputs[0]: shape = [2, 3]
13423//     [0, 2]: "a"
13424//     [1, 0]: "b"
13425//     [1, 1]: "c"
13426//
13427//     sp_inputs[1]: shape = [2, 4]
13428//     [0, 1]: "d"
13429//     [0, 2]: "e"
13430//
13431// then the output will be
13432//
13433//     shape = [2, 7]
13434//     [0, 2]: "a"
13435//     [0, 4]: "d"
13436//     [0, 5]: "e"
13437//     [1, 0]: "b"
13438//     [1, 1]: "c"
13439//
13440// Graphically this is equivalent to doing
13441//
13442//     [    a] concat [  d e  ] = [    a   d e  ]
13443//     [b c  ]        [       ]   [b c          ]
13444//
13445// Arguments:
13446//	indices: 2-D.  Indices of each input `SparseTensor`.
13447//	values: 1-D.  Non-empty values of each `SparseTensor`.
13448//	shapes: 1-D.  Shapes of each `SparseTensor`.
13449//	concat_dim: Dimension to concatenate along. Must be in range [-rank, rank),
13450// where rank is the number of dimensions in each input `SparseTensor`.
13451//
13452// Returns 2-D.  Indices of the concatenated `SparseTensor`.1-D.  Non-empty values of the concatenated `SparseTensor`.1-D.  Shape of the concatenated `SparseTensor`.
13453func SparseConcat(scope *Scope, indices []tf.Output, values []tf.Output, shapes []tf.Output, concat_dim int64) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output) {
13454	if scope.Err() != nil {
13455		return
13456	}
13457	attrs := map[string]interface{}{"concat_dim": concat_dim}
13458	opspec := tf.OpSpec{
13459		Type: "SparseConcat",
13460		Input: []tf.Input{
13461			tf.OutputList(indices), tf.OutputList(values), tf.OutputList(shapes),
13462		},
13463		Attrs: attrs,
13464	}
13465	op := scope.AddOperation(opspec)
13466	return op.Output(0), op.Output(1), op.Output(2)
13467}
13468
13469// Deserialize and concatenate `SparseTensors` from a serialized minibatch.
13470//
13471// The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where
13472// `N` is the minibatch size and the rows correspond to packed outputs of
13473// `SerializeSparse`.  The ranks of the original `SparseTensor` objects
13474// must all match.  When the final `SparseTensor` is created, it has rank one
13475// higher than the ranks of the incoming `SparseTensor` objects
13476// (they have been concatenated along a new row dimension).
13477//
13478// The output `SparseTensor` object's shape values for all dimensions but the
13479// first are the max across the input `SparseTensor` objects' shape values
13480// for the corresponding dimensions.  Its first shape value is `N`, the minibatch
13481// size.
13482//
13483// The input `SparseTensor` objects' indices are assumed ordered in
13484// standard lexicographic order.  If this is not the case, after this
13485// step run `SparseReorder` to restore index ordering.
13486//
13487// For example, if the serialized input is a `[2 x 3]` matrix representing two
13488// original `SparseTensor` objects:
13489//
13490//     index = [ 0]
13491//             [10]
13492//             [20]
13493//     values = [1, 2, 3]
13494//     shape = [50]
13495//
13496// and
13497//
13498//     index = [ 2]
13499//             [10]
13500//     values = [4, 5]
13501//     shape = [30]
13502//
13503// then the final deserialized `SparseTensor` will be:
13504//
13505//     index = [0  0]
13506//             [0 10]
13507//             [0 20]
13508//             [1  2]
13509//             [1 10]
13510//     values = [1, 2, 3, 4, 5]
13511//     shape = [2 50]
13512//
13513// Arguments:
13514//	serialized_sparse: 2-D, The `N` serialized `SparseTensor` objects.
13515// Must have 3 columns.
13516//	dtype: The `dtype` of the serialized `SparseTensor` objects.
13517func DeserializeManySparse(scope *Scope, serialized_sparse tf.Output, dtype tf.DataType) (sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output) {
13518	if scope.Err() != nil {
13519		return
13520	}
13521	attrs := map[string]interface{}{"dtype": dtype}
13522	opspec := tf.OpSpec{
13523		Type: "DeserializeManySparse",
13524		Input: []tf.Input{
13525			serialized_sparse,
13526		},
13527		Attrs: attrs,
13528	}
13529	op := scope.AddOperation(opspec)
13530	return op.Output(0), op.Output(1), op.Output(2)
13531}
13532
13533// Deserialize `SparseTensor` objects.
13534//
13535// The input `serialized_sparse` must have the shape `[?, ?, ..., ?, 3]` where
13536// the last dimension stores serialized `SparseTensor` objects and the other N
13537// dimensions (N >= 0) correspond to a batch. The ranks of the original
13538// `SparseTensor` objects must all match. When the final `SparseTensor` is
13539// created, its rank is the rank of the incoming `SparseTensor` objects plus N;
13540// the sparse tensors have been concatenated along new dimensions, one for each
13541// batch.
13542//
13543// The output `SparseTensor` object's shape values for the original dimensions
13544// are the max across the input `SparseTensor` objects' shape values for the
13545// corresponding dimensions. The new dimensions match the size of the batch.
13546//
13547// The input `SparseTensor` objects' indices are assumed ordered in
13548// standard lexicographic order.  If this is not the case, after this
13549// step run `SparseReorder` to restore index ordering.
13550//
13551// For example, if the serialized input is a `[2 x 3]` matrix representing two
13552// original `SparseTensor` objects:
13553//
13554//     index = [ 0]
13555//             [10]
13556//             [20]
13557//     values = [1, 2, 3]
13558//     shape = [50]
13559//
13560// and
13561//
13562//     index = [ 2]
13563//             [10]
13564//     values = [4, 5]
13565//     shape = [30]
13566//
13567// then the final deserialized `SparseTensor` will be:
13568//
13569//     index = [0  0]
13570//             [0 10]
13571//             [0 20]
13572//             [1  2]
13573//             [1 10]
13574//     values = [1, 2, 3, 4, 5]
13575//     shape = [2 50]
13576//
13577// Arguments:
13578//	serialized_sparse: The serialized `SparseTensor` objects. The last dimension
13579// must have 3 columns.
13580//	dtype: The `dtype` of the serialized `SparseTensor` objects.
13581func DeserializeSparse(scope *Scope, serialized_sparse tf.Output, dtype tf.DataType) (sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output) {
13582	if scope.Err() != nil {
13583		return
13584	}
13585	attrs := map[string]interface{}{"dtype": dtype}
13586	opspec := tf.OpSpec{
13587		Type: "DeserializeSparse",
13588		Input: []tf.Input{
13589			serialized_sparse,
13590		},
13591		Attrs: attrs,
13592	}
13593	op := scope.AddOperation(opspec)
13594	return op.Output(0), op.Output(1), op.Output(2)
13595}
13596
13597// MaxPool3DGradGradAttr is an optional argument to MaxPool3DGradGrad.
13598type MaxPool3DGradGradAttr func(optionalAttr)
13599
13600// MaxPool3DGradGradDataFormat sets the optional data_format attribute to value.
13601//
13602// value: The data format of the input and output data. With the
13603// default format "NDHWC", the data is stored in the order of:
13604//     [batch, in_depth, in_height, in_width, in_channels].
13605// Alternatively, the format could be "NCDHW", the data storage order is:
13606//     [batch, in_channels, in_depth, in_height, in_width].
13607// If not specified, defaults to "NDHWC"
13608func MaxPool3DGradGradDataFormat(value string) MaxPool3DGradGradAttr {
13609	return func(m optionalAttr) {
13610		m["data_format"] = value
13611	}
13612}
13613
13614// Computes second-order gradients of the maxpooling function.
13615//
13616// Arguments:
13617//	orig_input: The original input tensor.
13618//	orig_output: The original output tensor.
13619//	grad: Output backprop of shape `[batch, depth, rows, cols, channels]`.
13620//	ksize: 1-D tensor of length 5. The size of the window for each dimension of
13621// the input tensor. Must have `ksize[0] = ksize[4] = 1`.
13622//	strides: 1-D tensor of length 5. The stride of the sliding window for each
13623// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
13624//	padding: The type of padding algorithm to use.
13625//
13626// Returns Gradients of gradients w.r.t. the input to `max_pool`.
13627func MaxPool3DGradGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPool3DGradGradAttr) (output tf.Output) {
13628	if scope.Err() != nil {
13629		return
13630	}
13631	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
13632	for _, a := range optional {
13633		a(attrs)
13634	}
13635	opspec := tf.OpSpec{
13636		Type: "MaxPool3DGradGrad",
13637		Input: []tf.Input{
13638			orig_input, orig_output, grad,
13639		},
13640		Attrs: attrs,
13641	}
13642	op := scope.AddOperation(opspec)
13643	return op.Output(0)
13644}
13645
13646// Conv3DBackpropFilterV2Attr is an optional argument to Conv3DBackpropFilterV2.
13647type Conv3DBackpropFilterV2Attr func(optionalAttr)
13648
13649// Conv3DBackpropFilterV2DataFormat sets the optional data_format attribute to value.
13650//
13651// value: The data format of the input and output data. With the
13652// default format "NDHWC", the data is stored in the order of:
13653//     [batch, in_depth, in_height, in_width, in_channels].
13654// Alternatively, the format could be "NCDHW", the data storage order is:
13655//     [batch, in_channels, in_depth, in_height, in_width].
13656// If not specified, defaults to "NDHWC"
13657func Conv3DBackpropFilterV2DataFormat(value string) Conv3DBackpropFilterV2Attr {
13658	return func(m optionalAttr) {
13659		m["data_format"] = value
13660	}
13661}
13662
13663// Conv3DBackpropFilterV2Dilations sets the optional dilations attribute to value.
13664//
13665// value: 1-D tensor of length 5.  The dilation factor for each dimension of
13666// `input`. If set to k > 1, there will be k-1 skipped cells between each
13667// filter element on that dimension. The dimension order is determined by the
13668// value of `data_format`, see above for details. Dilations in the batch and
13669// depth dimensions must be 1.
13670// If not specified, defaults to <i:1 i:1 i:1 i:1 i:1 >
13671func Conv3DBackpropFilterV2Dilations(value []int64) Conv3DBackpropFilterV2Attr {
13672	return func(m optionalAttr) {
13673		m["dilations"] = value
13674	}
13675}
13676
13677// Computes the gradients of 3-D convolution with respect to the filter.
13678//
13679// Arguments:
13680//	input: Shape `[batch, depth, rows, cols, in_channels]`.
13681//	filter_sizes: An integer vector representing the tensor shape of `filter`,
13682// where `filter` is a 5-D
13683// `[filter_depth, filter_height, filter_width, in_channels, out_channels]`
13684// tensor.
13685//	out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
13686// out_channels]`.
13687//	strides: 1-D tensor of length 5. The stride of the sliding window for each
13688// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
13689//	padding: The type of padding algorithm to use.
13690func Conv3DBackpropFilterV2(scope *Scope, input tf.Output, filter_sizes tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv3DBackpropFilterV2Attr) (output tf.Output) {
13691	if scope.Err() != nil {
13692		return
13693	}
13694	attrs := map[string]interface{}{"strides": strides, "padding": padding}
13695	for _, a := range optional {
13696		a(attrs)
13697	}
13698	opspec := tf.OpSpec{
13699		Type: "Conv3DBackpropFilterV2",
13700		Input: []tf.Input{
13701			input, filter_sizes, out_backprop,
13702		},
13703		Attrs: attrs,
13704	}
13705	op := scope.AddOperation(opspec)
13706	return op.Output(0)
13707}
13708
13709// Execute a sub graph on a remote processor.
13710//
13711// The graph specifications(such as graph itself, input tensors and output names)
13712// are stored as a serialized protocol buffer of RemoteFusedGraphExecuteInfo
13713// as serialized_remote_fused_graph_execute_info.
13714// The specifications will be passed to a dedicated registered
13715// remote fused graph executor.  The executor will send the graph specifications
13716// to a remote processor and execute that graph.  The execution results
13717// will be passed to consumer nodes as outputs of this node.
13718//
13719// Arguments:
13720//	inputs: Arbitrary number of tensors with arbitrary data types
13721//
13722//	serialized_remote_fused_graph_execute_info: Serialized protocol buffer
13723// of RemoteFusedGraphExecuteInfo which contains graph specifications.
13724//
13725// Returns Arbitrary number of tensors with arbitrary data types
13726func RemoteFusedGraphExecute(scope *Scope, inputs []tf.Output, Toutputs []tf.DataType, serialized_remote_fused_graph_execute_info string) (outputs []tf.Output) {
13727	if scope.Err() != nil {
13728		return
13729	}
13730	attrs := map[string]interface{}{"Toutputs": Toutputs, "serialized_remote_fused_graph_execute_info": serialized_remote_fused_graph_execute_info}
13731	opspec := tf.OpSpec{
13732		Type: "RemoteFusedGraphExecute",
13733		Input: []tf.Input{
13734			tf.OutputList(inputs),
13735		},
13736		Attrs: attrs,
13737	}
13738	op := scope.AddOperation(opspec)
13739	if scope.Err() != nil {
13740		return
13741	}
13742	var idx int
13743	var err error
13744	if outputs, idx, err = makeOutputList(op, idx, "outputs"); err != nil {
13745		scope.UpdateErr("RemoteFusedGraphExecute", err)
13746		return
13747	}
13748	return outputs
13749}
13750
13751// SerializeManySparseAttr is an optional argument to SerializeManySparse.
13752type SerializeManySparseAttr func(optionalAttr)
13753
13754// SerializeManySparseOutType sets the optional out_type attribute to value.
13755//
13756// value: The `dtype` to use for serialization; the supported types are `string`
13757// (default) and `variant`.
13758// If not specified, defaults to DT_STRING
13759func SerializeManySparseOutType(value tf.DataType) SerializeManySparseAttr {
13760	return func(m optionalAttr) {
13761		m["out_type"] = value
13762	}
13763}
13764
13765// Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor` object.
13766//
13767// The `SparseTensor` must have rank `R` greater than 1, and the first dimension
13768// is treated as the minibatch dimension.  Elements of the `SparseTensor`
13769// must be sorted in increasing order of this first dimension.  The serialized
13770// `SparseTensor` objects going into each row of `serialized_sparse` will have
13771// rank `R-1`.
13772//
13773// The minibatch size `N` is extracted from `sparse_shape[0]`.
13774//
13775// Arguments:
13776//	sparse_indices: 2-D.  The `indices` of the minibatch `SparseTensor`.
13777//	sparse_values: 1-D.  The `values` of the minibatch `SparseTensor`.
13778//	sparse_shape: 1-D.  The `shape` of the minibatch `SparseTensor`.
13779func SerializeManySparse(scope *Scope, sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output, optional ...SerializeManySparseAttr) (serialized_sparse tf.Output) {
13780	if scope.Err() != nil {
13781		return
13782	}
13783	attrs := map[string]interface{}{}
13784	for _, a := range optional {
13785		a(attrs)
13786	}
13787	opspec := tf.OpSpec{
13788		Type: "SerializeManySparse",
13789		Input: []tf.Input{
13790			sparse_indices, sparse_values, sparse_shape,
13791		},
13792		Attrs: attrs,
13793	}
13794	op := scope.AddOperation(opspec)
13795	return op.Output(0)
13796}
13797
13798// Computes inverse hyperbolic cosine of x element-wise.
13799func Acosh(scope *Scope, x tf.Output) (y tf.Output) {
13800	if scope.Err() != nil {
13801		return
13802	}
13803	opspec := tf.OpSpec{
13804		Type: "Acosh",
13805		Input: []tf.Input{
13806			x,
13807		},
13808	}
13809	op := scope.AddOperation(opspec)
13810	return op.Output(0)
13811}
13812
13813// Computes rectified linear 6 gradients for a Relu6 operation.
13814//
13815// Arguments:
13816//	gradients: The backpropagated gradients to the corresponding Relu6 operation.
13817//	features: The features passed as input to the corresponding Relu6 operation, or
13818// its output; using either one produces the same result.
13819//
13820// Returns The gradients:
13821// `gradients * (features > 0) * (features < 6)`.
13822func Relu6Grad(scope *Scope, gradients tf.Output, features tf.Output) (backprops tf.Output) {
13823	if scope.Err() != nil {
13824		return
13825	}
13826	opspec := tf.OpSpec{
13827		Type: "Relu6Grad",
13828		Input: []tf.Input{
13829			gradients, features,
13830		},
13831	}
13832	op := scope.AddOperation(opspec)
13833	return op.Output(0)
13834}
13835
13836// Computes natural logarithm of (1 + x) element-wise.
13837//
13838// I.e., \\(y = \log_e (1 + x)\\).
13839func Log1p(scope *Scope, x tf.Output) (y tf.Output) {
13840	if scope.Err() != nil {
13841		return
13842	}
13843	opspec := tf.OpSpec{
13844		Type: "Log1p",
13845		Input: []tf.Input{
13846			x,
13847		},
13848	}
13849	op := scope.AddOperation(opspec)
13850	return op.Output(0)
13851}
13852
13853// ResizeBicubicAttr is an optional argument to ResizeBicubic.
13854type ResizeBicubicAttr func(optionalAttr)
13855
13856// ResizeBicubicAlignCorners sets the optional align_corners attribute to value.
13857//
13858// value: If true, the centers of the 4 corner pixels of the input and output tensors are
13859// aligned, preserving the values at the corner pixels. Defaults to false.
13860// If not specified, defaults to false
13861func ResizeBicubicAlignCorners(value bool) ResizeBicubicAttr {
13862	return func(m optionalAttr) {
13863		m["align_corners"] = value
13864	}
13865}
13866
13867// ResizeBicubicHalfPixelCenters sets the optional half_pixel_centers attribute to value.
13868// If not specified, defaults to false
13869func ResizeBicubicHalfPixelCenters(value bool) ResizeBicubicAttr {
13870	return func(m optionalAttr) {
13871		m["half_pixel_centers"] = value
13872	}
13873}
13874
13875// Resize `images` to `size` using bicubic interpolation.
13876//
13877// Input images can be of different types but output images are always float.
13878//
13879// Arguments:
13880//	images: 4-D with shape `[batch, height, width, channels]`.
13881//	size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
13882// new size for the images.
13883//
13884// Returns 4-D with shape
13885// `[batch, new_height, new_width, channels]`.
13886func ResizeBicubic(scope *Scope, images tf.Output, size tf.Output, optional ...ResizeBicubicAttr) (resized_images tf.Output) {
13887	if scope.Err() != nil {
13888		return
13889	}
13890	attrs := map[string]interface{}{}
13891	for _, a := range optional {
13892		a(attrs)
13893	}
13894	opspec := tf.OpSpec{
13895		Type: "ResizeBicubic",
13896		Input: []tf.Input{
13897			images, size,
13898		},
13899		Attrs: attrs,
13900	}
13901	op := scope.AddOperation(opspec)
13902	return op.Output(0)
13903}
13904
13905// SparseTensorDenseMatMulAttr is an optional argument to SparseTensorDenseMatMul.
13906type SparseTensorDenseMatMulAttr func(optionalAttr)
13907
13908// SparseTensorDenseMatMulAdjointA sets the optional adjoint_a attribute to value.
13909//
13910// value: Use the adjoint of A in the matrix multiply.  If A is complex, this
13911// is transpose(conj(A)).  Otherwise it's transpose(A).
13912// If not specified, defaults to false
13913func SparseTensorDenseMatMulAdjointA(value bool) SparseTensorDenseMatMulAttr {
13914	return func(m optionalAttr) {
13915		m["adjoint_a"] = value
13916	}
13917}
13918
13919// SparseTensorDenseMatMulAdjointB sets the optional adjoint_b attribute to value.
13920//
13921// value: Use the adjoint of B in the matrix multiply.  If B is complex, this
13922// is transpose(conj(B)).  Otherwise it's transpose(B).
13923// If not specified, defaults to false
13924func SparseTensorDenseMatMulAdjointB(value bool) SparseTensorDenseMatMulAttr {
13925	return func(m optionalAttr) {
13926		m["adjoint_b"] = value
13927	}
13928}
13929
13930// Multiply SparseTensor (of rank 2) "A" by dense matrix "B".
13931//
13932// No validity checking is performed on the indices of A.  However, the following
13933// input format is recommended for optimal behavior:
13934//
13935// if adjoint_a == false:
13936//   A should be sorted in lexicographically increasing order.  Use SparseReorder
13937//   if you're not sure.
13938// if adjoint_a == true:
13939//   A should be sorted in order of increasing dimension 1 (i.e., "column major"
13940//   order instead of "row major" order).
13941//
13942// Arguments:
13943//	a_indices: 2-D.  The `indices` of the `SparseTensor`, size `[nnz, 2]` Matrix.
13944//	a_values: 1-D.  The `values` of the `SparseTensor`, size `[nnz]` Vector.
13945//	a_shape: 1-D.  The `shape` of the `SparseTensor`, size `[2]` Vector.
13946//	b: 2-D.  A dense Matrix.
13947func SparseTensorDenseMatMul(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, b tf.Output, optional ...SparseTensorDenseMatMulAttr) (product tf.Output) {
13948	if scope.Err() != nil {
13949		return
13950	}
13951	attrs := map[string]interface{}{}
13952	for _, a := range optional {
13953		a(attrs)
13954	}
13955	opspec := tf.OpSpec{
13956		Type: "SparseTensorDenseMatMul",
13957		Input: []tf.Input{
13958			a_indices, a_values, a_shape, b,
13959		},
13960		Attrs: attrs,
13961	}
13962	op := scope.AddOperation(opspec)
13963	return op.Output(0)
13964}
13965
13966// Adds two `SparseTensor` objects to produce another `SparseTensor`.
13967//
13968// The input `SparseTensor` objects' indices are assumed ordered in standard
13969// lexicographic order.  If this is not the case, before this step run
13970// `SparseReorder` to restore index ordering.
13971//
13972// By default, if two values sum to zero at some index, the output `SparseTensor`
13973// would still include that particular location in its index, storing a zero in the
13974// corresponding value slot.  To override this, callers can specify `thresh`,
13975// indicating that if the sum has a magnitude strictly smaller than `thresh`, its
13976// corresponding value and index would then not be included.  In particular,
13977// `thresh == 0` (default) means everything is kept and actual thresholding happens
13978// only for a positive value.
13979//
13980// In the following shapes, `nnz` is the count after taking `thresh` into account.
13981//
13982// Arguments:
13983//	a_indices: 2-D.  The `indices` of the first `SparseTensor`, size `[nnz, ndims]` Matrix.
13984//	a_values: 1-D.  The `values` of the first `SparseTensor`, size `[nnz]` Vector.
13985//	a_shape: 1-D.  The `shape` of the first `SparseTensor`, size `[ndims]` Vector.
13986//	b_indices: 2-D.  The `indices` of the second `SparseTensor`, size `[nnz, ndims]` Matrix.
13987//	b_values: 1-D.  The `values` of the second `SparseTensor`, size `[nnz]` Vector.
13988//	b_shape: 1-D.  The `shape` of the second `SparseTensor`, size `[ndims]` Vector.
13989//	thresh: 0-D.  The magnitude threshold that determines if an output value/index
13990// pair takes space.
13991func SparseAdd(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, b_indices tf.Output, b_values tf.Output, b_shape tf.Output, thresh tf.Output) (sum_indices tf.Output, sum_values tf.Output, sum_shape tf.Output) {
13992	if scope.Err() != nil {
13993		return
13994	}
13995	opspec := tf.OpSpec{
13996		Type: "SparseAdd",
13997		Input: []tf.Input{
13998			a_indices, a_values, a_shape, b_indices, b_values, b_shape, thresh,
13999		},
14000	}
14001	op := scope.AddOperation(opspec)
14002	return op.Output(0), op.Output(1), op.Output(2)
14003}
14004
14005// EnqueueTPUEmbeddingSparseTensorBatchAttr is an optional argument to EnqueueTPUEmbeddingSparseTensorBatch.
14006type EnqueueTPUEmbeddingSparseTensorBatchAttr func(optionalAttr)
14007
14008// EnqueueTPUEmbeddingSparseTensorBatchDeviceOrdinal sets the optional device_ordinal attribute to value.
14009//
14010// value: The TPU device to use. Should be >= 0 and less than the number
14011// of TPU cores in the task on which the node is placed.
14012// If not specified, defaults to -1
14013func EnqueueTPUEmbeddingSparseTensorBatchDeviceOrdinal(value int64) EnqueueTPUEmbeddingSparseTensorBatchAttr {
14014	return func(m optionalAttr) {
14015		m["device_ordinal"] = value
14016	}
14017}
14018
14019// EnqueueTPUEmbeddingSparseTensorBatchCombiners sets the optional combiners attribute to value.
14020//
14021// value: A list of string scalars, one for each embedding table that specify
14022// how to normalize the embedding activations after weighted summation.
14023// Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have
14024// the sum of the weights be 0 for 'mean' or the sum of the squared weights be
14025// 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for
14026// all tables.
14027// If not specified, defaults to <>
14028func EnqueueTPUEmbeddingSparseTensorBatchCombiners(value []string) EnqueueTPUEmbeddingSparseTensorBatchAttr {
14029	return func(m optionalAttr) {
14030		m["combiners"] = value
14031	}
14032}
14033
14034// Eases the porting of code that uses tf.nn.embedding_lookup_sparse().
14035//
14036// sample_indices[i], embedding_indices[i] and aggregation_weights[i] correspond
14037// to the ith feature. table_ids[i] indicates which embedding table to look up ith
14038// feature.
14039//
14040// The tensors at corresponding positions in the three input lists (sample_indices,
14041// embedding_indices and aggregation_weights) must have the same shape, i.e. rank 1
14042// with dim_size() equal to the total number of lookups into the table described by
14043// the corresponding feature.
14044//
14045// Arguments:
14046//	sample_indices: A list of rank 1 Tensors specifying the training example to
14047// which the corresponding embedding_indices and aggregation_weights values
14048// belong. It corresponds to sp_ids.indices[:,0] in  embedding_lookup_sparse().
14049//	embedding_indices: A list of rank 1 Tensors, indices into the embedding tables.
14050// It corresponds to sp_ids.values in embedding_lookup_sparse().
14051//	aggregation_weights: A list of rank 1 Tensors containing per training example
14052// aggregation weights. It corresponds to sp_weights.values in
14053// embedding_lookup_sparse().
14054//	mode_override: A string input that overrides the mode specified in the
14055// TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',
14056// 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set
14057// in TPUEmbeddingConfiguration is used, otherwise mode_override is used.
14058//	table_ids: A list of integers specifying the identifier of the embedding table
14059// (offset of TableDescriptor in the TPUEmbeddingConfiguration) to lookup the
14060// corresponding input. The ith input is looked up using table_ids[i]. The size
14061// of the table_ids list must be equal to that of sample_indices,
14062// embedding_indices and aggregation_weights.
14063//
14064// Returns the created operation.
14065func EnqueueTPUEmbeddingSparseTensorBatch(scope *Scope, sample_indices []tf.Output, embedding_indices []tf.Output, aggregation_weights []tf.Output, mode_override tf.Output, table_ids []int64, optional ...EnqueueTPUEmbeddingSparseTensorBatchAttr) (o *tf.Operation) {
14066	if scope.Err() != nil {
14067		return
14068	}
14069	attrs := map[string]interface{}{"table_ids": table_ids}
14070	for _, a := range optional {
14071		a(attrs)
14072	}
14073	opspec := tf.OpSpec{
14074		Type: "EnqueueTPUEmbeddingSparseTensorBatch",
14075		Input: []tf.Input{
14076			tf.OutputList(sample_indices), tf.OutputList(embedding_indices), tf.OutputList(aggregation_weights), mode_override,
14077		},
14078		Attrs: attrs,
14079	}
14080	return scope.AddOperation(opspec)
14081}
14082
14083// The gradient operator for the SparseAdd op.
14084//
14085// The SparseAdd op calculates A + B, where A, B, and the sum are all represented
14086// as `SparseTensor` objects.  This op takes in the upstream gradient w.r.t.
14087// non-empty values of the sum, and outputs the gradients w.r.t. the non-empty
14088// values of A and B.
14089//
14090// Arguments:
14091//	backprop_val_grad: 1-D with shape `[nnz(sum)]`.  The gradient with respect to
14092// the non-empty values of the sum.
14093//	a_indices: 2-D.  The `indices` of the `SparseTensor` A, size `[nnz(A), ndims]`.
14094//	b_indices: 2-D.  The `indices` of the `SparseTensor` B, size `[nnz(B), ndims]`.
14095//	sum_indices: 2-D.  The `indices` of the sum `SparseTensor`, size
14096// `[nnz(sum), ndims]`.
14097//
14098// Returns 1-D with shape `[nnz(A)]`. The gradient with respect to the
14099// non-empty values of A.1-D with shape `[nnz(B)]`. The gradient with respect to the
14100// non-empty values of B.
14101func SparseAddGrad(scope *Scope, backprop_val_grad tf.Output, a_indices tf.Output, b_indices tf.Output, sum_indices tf.Output) (a_val_grad tf.Output, b_val_grad tf.Output) {
14102	if scope.Err() != nil {
14103		return
14104	}
14105	opspec := tf.OpSpec{
14106		Type: "SparseAddGrad",
14107		Input: []tf.Input{
14108			backprop_val_grad, a_indices, b_indices, sum_indices,
14109		},
14110	}
14111	op := scope.AddOperation(opspec)
14112	return op.Output(0), op.Output(1)
14113}
14114
14115// DenseToSparseSetOperationAttr is an optional argument to DenseToSparseSetOperation.
14116type DenseToSparseSetOperationAttr func(optionalAttr)
14117
14118// DenseToSparseSetOperationValidateIndices sets the optional validate_indices attribute to value.
14119// If not specified, defaults to true
14120func DenseToSparseSetOperationValidateIndices(value bool) DenseToSparseSetOperationAttr {
14121	return func(m optionalAttr) {
14122		m["validate_indices"] = value
14123	}
14124}
14125
14126// Applies set operation along last dimension of `Tensor` and `SparseTensor`.
14127//
14128// See SetOperationOp::SetOperationFromContext for values of `set_operation`.
14129//
14130// Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`,
14131// and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same
14132// as `set1`. Dimension `n` contains values in a set, duplicates are allowed but
14133// ignored.
14134//
14135// If `validate_indices` is `True`, this op validates the order and range of `set2`
14136// indices.
14137//
14138// Output `result` is a `SparseTensor` represented by `result_indices`,
14139// `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this
14140// has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`
14141// dimension contains the result of `set_operation` applied to the corresponding
14142// `[0...n-1]` dimension of `set`.
14143//
14144// Arguments:
14145//	set1: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`.
14146// Dimension `n` contains values in a set, duplicates are allowed but ignored.
14147//	set2_indices: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major
14148// order.
14149//	set2_values: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major
14150// order.
14151//	set2_shape: 1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must
14152// be the same as the 1st `n-1` dimensions of `set1`, `result_shape[n]` is the
14153// max set size across `n-1` dimensions.
14154//
14155//
14156// Returns 2D indices of a `SparseTensor`.1D values of a `SparseTensor`.1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is
14157// the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`
14158// is the max result set size across all `0...n-1` dimensions.
14159func DenseToSparseSetOperation(scope *Scope, set1 tf.Output, set2_indices tf.Output, set2_values tf.Output, set2_shape tf.Output, set_operation string, optional ...DenseToSparseSetOperationAttr) (result_indices tf.Output, result_values tf.Output, result_shape tf.Output) {
14160	if scope.Err() != nil {
14161		return
14162	}
14163	attrs := map[string]interface{}{"set_operation": set_operation}
14164	for _, a := range optional {
14165		a(attrs)
14166	}
14167	opspec := tf.OpSpec{
14168		Type: "DenseToSparseSetOperation",
14169		Input: []tf.Input{
14170			set1, set2_indices, set2_values, set2_shape,
14171		},
14172		Attrs: attrs,
14173	}
14174	op := scope.AddOperation(opspec)
14175	return op.Output(0), op.Output(1), op.Output(2)
14176}
14177
14178// L2 Loss.
14179//
14180// Computes half the L2 norm of a tensor without the `sqrt`:
14181//
14182//     output = sum(t ** 2) / 2
14183//
14184// Arguments:
14185//	t: Typically 2-D, but may have any dimensions.
14186//
14187// Returns 0-D.
14188func L2Loss(scope *Scope, t tf.Output) (output tf.Output) {
14189	if scope.Err() != nil {
14190		return
14191	}
14192	opspec := tf.OpSpec{
14193		Type: "L2Loss",
14194		Input: []tf.Input{
14195			t,
14196		},
14197	}
14198	op := scope.AddOperation(opspec)
14199	return op.Output(0)
14200}
14201
14202// Conv3DAttr is an optional argument to Conv3D.
14203type Conv3DAttr func(optionalAttr)
14204
14205// Conv3DDataFormat sets the optional data_format attribute to value.
14206//
14207// value: The data format of the input and output data. With the
14208// default format "NDHWC", the data is stored in the order of:
14209//     [batch, in_depth, in_height, in_width, in_channels].
14210// Alternatively, the format could be "NCDHW", the data storage order is:
14211//     [batch, in_channels, in_depth, in_height, in_width].
14212// If not specified, defaults to "NDHWC"
14213func Conv3DDataFormat(value string) Conv3DAttr {
14214	return func(m optionalAttr) {
14215		m["data_format"] = value
14216	}
14217}
14218
14219// Conv3DDilations sets the optional dilations attribute to value.
14220//
14221// value: 1-D tensor of length 5.  The dilation factor for each dimension of
14222// `input`. If set to k > 1, there will be k-1 skipped cells between each
14223// filter element on that dimension. The dimension order is determined by the
14224// value of `data_format`, see above for details. Dilations in the batch and
14225// depth dimensions must be 1.
14226// If not specified, defaults to <i:1 i:1 i:1 i:1 i:1 >
14227func Conv3DDilations(value []int64) Conv3DAttr {
14228	return func(m optionalAttr) {
14229		m["dilations"] = value
14230	}
14231}
14232
14233// Computes a 3-D convolution given 5-D `input` and `filter` tensors.
14234//
14235// In signal processing, cross-correlation is a measure of similarity of
14236// two waveforms as a function of a time-lag applied to one of them. This
14237// is also known as a sliding dot product or sliding inner-product.
14238//
14239// Our Conv3D implements a form of cross-correlation.
14240//
14241// Arguments:
14242//	input: Shape `[batch, in_depth, in_height, in_width, in_channels]`.
14243//	filter: Shape `[filter_depth, filter_height, filter_width, in_channels,
14244// out_channels]`. `in_channels` must match between `input` and `filter`.
14245//	strides: 1-D tensor of length 5. The stride of the sliding window for each
14246// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
14247//	padding: The type of padding algorithm to use.
14248func Conv3D(scope *Scope, input tf.Output, filter tf.Output, strides []int64, padding string, optional ...Conv3DAttr) (output tf.Output) {
14249	if scope.Err() != nil {
14250		return
14251	}
14252	attrs := map[string]interface{}{"strides": strides, "padding": padding}
14253	for _, a := range optional {
14254		a(attrs)
14255	}
14256	opspec := tf.OpSpec{
14257		Type: "Conv3D",
14258		Input: []tf.Input{
14259			input, filter,
14260		},
14261		Attrs: attrs,
14262	}
14263	op := scope.AddOperation(opspec)
14264	return op.Output(0)
14265}
14266
14267// Adds up a SparseTensor and a dense Tensor, using these special rules:
14268//
14269// (1) Broadcasts the dense side to have the same shape as the sparse side, if
14270//     eligible;
14271// (2) Then, only the dense values pointed to by the indices of the SparseTensor
14272//     participate in the cwise addition.
14273//
14274// By these rules, the result is a logical SparseTensor with exactly the same
14275// indices and shape, but possibly with different non-zero values.  The output of
14276// this Op is the resultant non-zero values.
14277//
14278// Arguments:
14279//	sp_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
14280// SparseTensor, possibly not in canonical ordering.
14281//	sp_values: 1-D.  `N` non-empty values corresponding to `sp_indices`.
14282//	sp_shape: 1-D.  Shape of the input SparseTensor.
14283//	dense: `R`-D.  The dense Tensor operand.
14284//
14285// Returns 1-D.  The `N` values that are operated on.
14286func SparseDenseCwiseAdd(scope *Scope, sp_indices tf.Output, sp_values tf.Output, sp_shape tf.Output, dense tf.Output) (output tf.Output) {
14287	if scope.Err() != nil {
14288		return
14289	}
14290	opspec := tf.OpSpec{
14291		Type: "SparseDenseCwiseAdd",
14292		Input: []tf.Input{
14293			sp_indices, sp_values, sp_shape, dense,
14294		},
14295	}
14296	op := scope.AddOperation(opspec)
14297	return op.Output(0)
14298}
14299
14300// UnicodeDecodeAttr is an optional argument to UnicodeDecode.
14301type UnicodeDecodeAttr func(optionalAttr)
14302
14303// UnicodeDecodeErrors sets the optional errors attribute to value.
14304//
14305// value: Error handling policy when there is invalid formatting found in the input.
14306// The value of 'strict' will cause the operation to produce a InvalidArgument
14307// error on any invalid input formatting. A value of 'replace' (the default) will
14308// cause the operation to replace any invalid formatting in the input with the
14309// `replacement_char` codepoint. A value of 'ignore' will cause the operation to
14310// skip any invalid formatting in the input and produce no corresponding output
14311// character.
14312// If not specified, defaults to "replace"
14313func UnicodeDecodeErrors(value string) UnicodeDecodeAttr {
14314	return func(m optionalAttr) {
14315		m["errors"] = value
14316	}
14317}
14318
14319// UnicodeDecodeReplacementChar sets the optional replacement_char attribute to value.
14320//
14321// value: The replacement character codepoint to be used in place of any invalid
14322// formatting in the input when `errors='replace'`. Any valid unicode codepoint may
14323// be used. The default value is the default unicode replacement character is
14324// 0xFFFD or U+65533.)
14325// If not specified, defaults to 65533
14326func UnicodeDecodeReplacementChar(value int64) UnicodeDecodeAttr {
14327	return func(m optionalAttr) {
14328		m["replacement_char"] = value
14329	}
14330}
14331
14332// UnicodeDecodeReplaceControlCharacters sets the optional replace_control_characters attribute to value.
14333//
14334// value: Whether to replace the C0 control characters (00-1F) with the
14335// `replacement_char`. Default is false.
14336// If not specified, defaults to false
14337func UnicodeDecodeReplaceControlCharacters(value bool) UnicodeDecodeAttr {
14338	return func(m optionalAttr) {
14339		m["replace_control_characters"] = value
14340	}
14341}
14342
14343// Decodes each string in `input` into a sequence of Unicode code points.
14344//
14345// The character codepoints for all strings are returned using a single vector
14346// `char_values`, with strings expanded to characters in row-major order.
14347//
14348// The `row_splits` tensor indicates where the codepoints for
14349// each input string begin and end within the `char_values` tensor.
14350// In particular, the values for the `i`th
14351// string (in row-major order) are stored in the slice
14352// `[row_splits[i]:row_splits[i+1]]`. Thus:
14353//
14354// * `char_values[row_splits[i]+j]` is the Unicode codepoint for the `j`th
14355//   character in the `i`th string (in row-major order).
14356// * `row_splits[i+1] - row_splits[i]` is the number of characters in the `i`th
14357//   string (in row-major order).
14358//
14359// Arguments:
14360//	input: The text to be decoded. Can have any shape. Note that the output is flattened
14361// to a vector of char values.
14362//	input_encoding: Text encoding of the input strings. This is any of the encodings supported
14363// by ICU ucnv algorithmic converters. Examples: `"UTF-16", "US ASCII", "UTF-8"`.
14364//
14365// Returns A 1D int32 tensor containing the row splits.A 1D int32 Tensor containing the decoded codepoints.
14366func UnicodeDecode(scope *Scope, input tf.Output, input_encoding string, optional ...UnicodeDecodeAttr) (row_splits tf.Output, char_values tf.Output) {
14367	if scope.Err() != nil {
14368		return
14369	}
14370	attrs := map[string]interface{}{"input_encoding": input_encoding}
14371	for _, a := range optional {
14372		a(attrs)
14373	}
14374	opspec := tf.OpSpec{
14375		Type: "UnicodeDecode",
14376		Input: []tf.Input{
14377			input,
14378		},
14379		Attrs: attrs,
14380	}
14381	op := scope.AddOperation(opspec)
14382	return op.Output(0), op.Output(1)
14383}
14384
14385// QuantizeV2Attr is an optional argument to QuantizeV2.
14386type QuantizeV2Attr func(optionalAttr)
14387
14388// QuantizeV2Mode sets the optional mode attribute to value.
14389// If not specified, defaults to "MIN_COMBINED"
14390func QuantizeV2Mode(value string) QuantizeV2Attr {
14391	return func(m optionalAttr) {
14392		m["mode"] = value
14393	}
14394}
14395
14396// QuantizeV2RoundMode sets the optional round_mode attribute to value.
14397// If not specified, defaults to "HALF_AWAY_FROM_ZERO"
14398func QuantizeV2RoundMode(value string) QuantizeV2Attr {
14399	return func(m optionalAttr) {
14400		m["round_mode"] = value
14401	}
14402}
14403
14404// Quantize the 'input' tensor of type float to 'output' tensor of type 'T'.
14405//
14406// [min_range, max_range] are scalar floats that specify the range for
14407// the 'input' data. The 'mode' attribute controls exactly which calculations are
14408// used to convert the float values to their quantized equivalents.  The
14409// 'round_mode' attribute controls which rounding tie-breaking algorithm is used
14410// when rounding float values to their quantized equivalents.
14411//
14412// In 'MIN_COMBINED' mode, each value of the tensor will undergo the following:
14413//
14414// ```
14415// out[i] = (in[i] - min_range) * range(T) / (max_range - min_range)
14416// if T == qint8: out[i] -= (range(T) + 1) / 2.0
14417// ```
14418//
14419// here `range(T) = numeric_limits<T>::max() - numeric_limits<T>::min()`
14420//
14421// *MIN_COMBINED Mode Example*
14422//
14423// Assume the input is type float and has a possible range of [0.0, 6.0] and the
14424// output type is quint8 ([0, 255]). The min_range and max_range values should be
14425// specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each
14426// value of the input by 255/6 and cast to quint8.
14427//
14428// If the output type was qint8 ([-128, 127]), the operation will additionally
14429// subtract each value by 128 prior to casting, so that the range of values aligns
14430// with the range of qint8.
14431//
14432// If the mode is 'MIN_FIRST', then this approach is used:
14433//
14434// ```
14435// num_discrete_values = 1 << (# of bits in T)
14436// range_adjust = num_discrete_values / (num_discrete_values - 1)
14437// range = (range_max - range_min) * range_adjust
14438// range_scale = num_discrete_values / range
14439// quantized = round(input * range_scale) - round(range_min * range_scale) +
14440//   numeric_limits<T>::min()
14441// quantized = max(quantized, numeric_limits<T>::min())
14442// quantized = min(quantized, numeric_limits<T>::max())
14443// ```
14444//
14445// The biggest difference between this and MIN_COMBINED is that the minimum range
14446// is rounded first, before it's subtracted from the rounded value. With
14447// MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing
14448// and dequantizing will introduce a larger and larger error.
14449//
14450// *SCALED mode Example*
14451//
14452// `SCALED` mode matches the quantization approach used in
14453// `QuantizeAndDequantize{V2|V3}`.
14454//
14455// If the mode is `SCALED`, we do not use the full range of the output type,
14456// choosing to elide the lowest possible value for symmetry (e.g., output range is
14457// -127 to 127, not -128 to 127 for signed 8 bit quantization), so that 0.0 maps to
14458// 0.
14459//
14460// We first find the range of values in our tensor. The
14461// range we use is always centered on 0, so we find m such that
14462//
14463// ```c++
14464//   m = max(abs(input_min), abs(input_max))
14465// ```
14466//
14467// Our input tensor range is then `[-m, m]`.
14468//
14469// Next, we choose our fixed-point quantization buckets, `[min_fixed, max_fixed]`.
14470// If T is signed, this is
14471//
14472// ```
14473//   num_bits = sizeof(T) * 8
14474//   [min_fixed, max_fixed] =
14475//       [-(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - 1]
14476// ```
14477//
14478// Otherwise, if T is unsigned, the fixed-point range is
14479//
14480// ```
14481//   [min_fixed, max_fixed] = [0, (1 << num_bits) - 1]
14482// ```
14483//
14484// From this we compute our scaling factor, s:
14485//
14486// ```c++
14487//   s = (max_fixed - min_fixed) / (2 * m)
14488// ```
14489//
14490// Now we can quantize the elements of our tensor:
14491//
14492// ```c++
14493// result = round(input * s)
14494// ```
14495//
14496// One thing to watch out for is that the operator may choose to adjust the
14497// requested minimum and maximum values slightly during the quantization process,
14498// so you should always use the output ports as the range for further calculations.
14499// For example, if the requested minimum and maximum values are close to equal,
14500// they will be separated by a small epsilon value to prevent ill-formed quantized
14501// buffers from being created. Otherwise, you can end up with buffers where all the
14502// quantized values map to the same float value, which causes problems for
14503// operations that have to perform further calculations on them.
14504//
14505// Arguments:
14506//
14507//	min_range: The minimum scalar value possibly produced for the input.
14508//	max_range: The maximum scalar value possibly produced for the input.
14509//
14510//
14511// Returns The quantized data produced from the float input.The actual minimum scalar value used for the output.The actual maximum scalar value used for the output.
14512func QuantizeV2(scope *Scope, input tf.Output, min_range tf.Output, max_range tf.Output, T tf.DataType, optional ...QuantizeV2Attr) (output tf.Output, output_min tf.Output, output_max tf.Output) {
14513	if scope.Err() != nil {
14514		return
14515	}
14516	attrs := map[string]interface{}{"T": T}
14517	for _, a := range optional {
14518		a(attrs)
14519	}
14520	opspec := tf.OpSpec{
14521		Type: "QuantizeV2",
14522		Input: []tf.Input{
14523			input, min_range, max_range,
14524		},
14525		Attrs: attrs,
14526	}
14527	op := scope.AddOperation(opspec)
14528	return op.Output(0), op.Output(1), op.Output(2)
14529}
14530
14531// HistogramFixedWidthAttr is an optional argument to HistogramFixedWidth.
14532type HistogramFixedWidthAttr func(optionalAttr)
14533
14534// HistogramFixedWidthDtype sets the optional dtype attribute to value.
14535// If not specified, defaults to DT_INT32
14536func HistogramFixedWidthDtype(value tf.DataType) HistogramFixedWidthAttr {
14537	return func(m optionalAttr) {
14538		m["dtype"] = value
14539	}
14540}
14541
14542// Return histogram of values.
14543//
14544// Given the tensor `values`, this operation returns a rank 1 histogram counting
14545// the number of entries in `values` that fall into every bin.  The bins are
14546// equal width and determined by the arguments `value_range` and `nbins`.
14547//
14548// ```python
14549// # Bins will be:  (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
14550// nbins = 5
14551// value_range = [0.0, 5.0]
14552// new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]
14553//
14554// with tf.get_default_session() as sess:
14555//   hist = tf.histogram_fixed_width(new_values, value_range, nbins=5)
14556//   variables.global_variables_initializer().run()
14557//   sess.run(hist) => [2, 1, 1, 0, 2]
14558// ```
14559//
14560// Arguments:
14561//	values: Numeric `Tensor`.
14562//	value_range: Shape [2] `Tensor` of same `dtype` as `values`.
14563// values <= value_range[0] will be mapped to hist[0],
14564// values >= value_range[1] will be mapped to hist[-1].
14565//	nbins: Scalar `int32 Tensor`.  Number of histogram bins.
14566//
14567// Returns A 1-D `Tensor` holding histogram of values.
14568func HistogramFixedWidth(scope *Scope, values tf.Output, value_range tf.Output, nbins tf.Output, optional ...HistogramFixedWidthAttr) (out tf.Output) {
14569	if scope.Err() != nil {
14570		return
14571	}
14572	attrs := map[string]interface{}{}
14573	for _, a := range optional {
14574		a(attrs)
14575	}
14576	opspec := tf.OpSpec{
14577		Type: "HistogramFixedWidth",
14578		Input: []tf.Input{
14579			values, value_range, nbins,
14580		},
14581		Attrs: attrs,
14582	}
14583	op := scope.AddOperation(opspec)
14584	return op.Output(0)
14585}
14586
14587// Serializes the tree handle to a proto
14588//
14589// Arguments:
14590//	tree_handle: Handle to the tree resource to be serialized.
14591//
14592// Returns Serialied proto string of the tree resource.
14593func TensorForestTreeSerialize(scope *Scope, tree_handle tf.Output) (tree_config tf.Output) {
14594	if scope.Err() != nil {
14595		return
14596	}
14597	opspec := tf.OpSpec{
14598		Type: "TensorForestTreeSerialize",
14599		Input: []tf.Input{
14600			tree_handle,
14601		},
14602	}
14603	op := scope.AddOperation(opspec)
14604	return op.Output(0)
14605}
14606
14607// SparseMatMulAttr is an optional argument to SparseMatMul.
14608type SparseMatMulAttr func(optionalAttr)
14609
14610// SparseMatMulTransposeA sets the optional transpose_a attribute to value.
14611// If not specified, defaults to false
14612func SparseMatMulTransposeA(value bool) SparseMatMulAttr {
14613	return func(m optionalAttr) {
14614		m["transpose_a"] = value
14615	}
14616}
14617
14618// SparseMatMulTransposeB sets the optional transpose_b attribute to value.
14619// If not specified, defaults to false
14620func SparseMatMulTransposeB(value bool) SparseMatMulAttr {
14621	return func(m optionalAttr) {
14622		m["transpose_b"] = value
14623	}
14624}
14625
14626// SparseMatMulAIsSparse sets the optional a_is_sparse attribute to value.
14627// If not specified, defaults to false
14628func SparseMatMulAIsSparse(value bool) SparseMatMulAttr {
14629	return func(m optionalAttr) {
14630		m["a_is_sparse"] = value
14631	}
14632}
14633
14634// SparseMatMulBIsSparse sets the optional b_is_sparse attribute to value.
14635// If not specified, defaults to false
14636func SparseMatMulBIsSparse(value bool) SparseMatMulAttr {
14637	return func(m optionalAttr) {
14638		m["b_is_sparse"] = value
14639	}
14640}
14641
14642// Multiply matrix "a" by matrix "b".
14643//
14644// The inputs must be two-dimensional matrices and the inner dimension of "a" must
14645// match the outer dimension of "b". Both "a" and "b" must be `Tensor`s not
14646// `SparseTensor`s.  This op is optimized for the case where at least one of "a" or
14647// "b" is sparse, in the sense that they have a large proportion of zero values.
14648// The breakeven for using this versus a dense matrix multiply on one platform was
14649// 30% zero values in the sparse matrix.
14650//
14651// The gradient computation of this operation will only take advantage of sparsity
14652// in the input gradient when that gradient comes from a Relu.
14653func SparseMatMul(scope *Scope, a tf.Output, b tf.Output, optional ...SparseMatMulAttr) (product tf.Output) {
14654	if scope.Err() != nil {
14655		return
14656	}
14657	attrs := map[string]interface{}{}
14658	for _, a := range optional {
14659		a(attrs)
14660	}
14661	opspec := tf.OpSpec{
14662		Type: "SparseMatMul",
14663		Input: []tf.Input{
14664			a, b,
14665		},
14666		Attrs: attrs,
14667	}
14668	op := scope.AddOperation(opspec)
14669	return op.Output(0)
14670}
14671
14672// ExperimentalThreadPoolHandleAttr is an optional argument to ExperimentalThreadPoolHandle.
14673type ExperimentalThreadPoolHandleAttr func(optionalAttr)
14674
14675// ExperimentalThreadPoolHandleMaxIntraOpParallelism sets the optional max_intra_op_parallelism attribute to value.
14676//
14677// value: The maximum degree of parallelism to use within operations that execute on this
14678// threadpool.
14679// If not specified, defaults to 1
14680func ExperimentalThreadPoolHandleMaxIntraOpParallelism(value int64) ExperimentalThreadPoolHandleAttr {
14681	return func(m optionalAttr) {
14682		m["max_intra_op_parallelism"] = value
14683	}
14684}
14685
14686// ExperimentalThreadPoolHandleContainer sets the optional container attribute to value.
14687// If not specified, defaults to ""
14688func ExperimentalThreadPoolHandleContainer(value string) ExperimentalThreadPoolHandleAttr {
14689	return func(m optionalAttr) {
14690		m["container"] = value
14691	}
14692}
14693
14694// ExperimentalThreadPoolHandleSharedName sets the optional shared_name attribute to value.
14695// If not specified, defaults to ""
14696func ExperimentalThreadPoolHandleSharedName(value string) ExperimentalThreadPoolHandleAttr {
14697	return func(m optionalAttr) {
14698		m["shared_name"] = value
14699	}
14700}
14701
14702// Creates a dataset that uses a custom thread pool to compute `input_dataset`.
14703//
14704// Arguments:
14705//	num_threads: The number of threads in the thread pool.
14706//	display_name: A human-readable name for the threads that may be visible in some
14707// visualizations.
14708// threadpool.
14709//
14710// Returns A resource that can be consumed by one or more ExperimentalThreadPoolDataset
14711// ops.
14712func ExperimentalThreadPoolHandle(scope *Scope, num_threads int64, display_name string, optional ...ExperimentalThreadPoolHandleAttr) (handle tf.Output) {
14713	if scope.Err() != nil {
14714		return
14715	}
14716	attrs := map[string]interface{}{"num_threads": num_threads, "display_name": display_name}
14717	for _, a := range optional {
14718		a(attrs)
14719	}
14720	opspec := tf.OpSpec{
14721		Type: "ExperimentalThreadPoolHandle",
14722
14723		Attrs: attrs,
14724	}
14725	op := scope.AddOperation(opspec)
14726	return op.Output(0)
14727}
14728
14729// LoadTPUEmbeddingProximalAdagradParametersGradAccumDebugAttr is an optional argument to LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug.
14730type LoadTPUEmbeddingProximalAdagradParametersGradAccumDebugAttr func(optionalAttr)
14731
14732// LoadTPUEmbeddingProximalAdagradParametersGradAccumDebugTableId sets the optional table_id attribute to value.
14733// If not specified, defaults to -1
14734//
14735// REQUIRES: value >= -1
14736func LoadTPUEmbeddingProximalAdagradParametersGradAccumDebugTableId(value int64) LoadTPUEmbeddingProximalAdagradParametersGradAccumDebugAttr {
14737	return func(m optionalAttr) {
14738		m["table_id"] = value
14739	}
14740}
14741
14742// LoadTPUEmbeddingProximalAdagradParametersGradAccumDebugTableName sets the optional table_name attribute to value.
14743// If not specified, defaults to ""
14744func LoadTPUEmbeddingProximalAdagradParametersGradAccumDebugTableName(value string) LoadTPUEmbeddingProximalAdagradParametersGradAccumDebugAttr {
14745	return func(m optionalAttr) {
14746		m["table_name"] = value
14747	}
14748}
14749
14750// Load proximal Adagrad embedding parameters with debug support.
14751//
14752// An op that loads optimization parameters into HBM for embedding. Must be
14753// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
14754// embedding table configuration. For example, this op is used to install
14755// parameters that are loaded from a checkpoint before a training loop is
14756// executed.
14757//
14758// Arguments:
14759//	parameters: Value of parameters used in the proximal Adagrad optimization algorithm.
14760//	accumulators: Value of accumulators used in the proximal Adagrad optimization algorithm.
14761//	gradient_accumulators: Value of gradient_accumulators used in the proximal Adagrad optimization algorithm.
14762//
14763//
14764//
14765// Returns the created operation.
14766func LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug(scope *Scope, parameters tf.Output, accumulators tf.Output, gradient_accumulators tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingProximalAdagradParametersGradAccumDebugAttr) (o *tf.Operation) {
14767	if scope.Err() != nil {
14768		return
14769	}
14770	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
14771	for _, a := range optional {
14772		a(attrs)
14773	}
14774	opspec := tf.OpSpec{
14775		Type: "LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug",
14776		Input: []tf.Input{
14777			parameters, accumulators, gradient_accumulators,
14778		},
14779		Attrs: attrs,
14780	}
14781	return scope.AddOperation(opspec)
14782}
14783
14784// LoadTPUEmbeddingProximalAdagradParametersAttr is an optional argument to LoadTPUEmbeddingProximalAdagradParameters.
14785type LoadTPUEmbeddingProximalAdagradParametersAttr func(optionalAttr)
14786
14787// LoadTPUEmbeddingProximalAdagradParametersTableId sets the optional table_id attribute to value.
14788// If not specified, defaults to -1
14789//
14790// REQUIRES: value >= -1
14791func LoadTPUEmbeddingProximalAdagradParametersTableId(value int64) LoadTPUEmbeddingProximalAdagradParametersAttr {
14792	return func(m optionalAttr) {
14793		m["table_id"] = value
14794	}
14795}
14796
14797// LoadTPUEmbeddingProximalAdagradParametersTableName sets the optional table_name attribute to value.
14798// If not specified, defaults to ""
14799func LoadTPUEmbeddingProximalAdagradParametersTableName(value string) LoadTPUEmbeddingProximalAdagradParametersAttr {
14800	return func(m optionalAttr) {
14801		m["table_name"] = value
14802	}
14803}
14804
14805// Load proximal Adagrad embedding parameters.
14806//
14807// An op that loads optimization parameters into HBM for embedding. Must be
14808// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
14809// embedding table configuration. For example, this op is used to install
14810// parameters that are loaded from a checkpoint before a training loop is
14811// executed.
14812//
14813// Arguments:
14814//	parameters: Value of parameters used in the proximal Adagrad optimization algorithm.
14815//	accumulators: Value of accumulators used in the proximal Adagrad optimization algorithm.
14816//
14817//
14818//
14819// Returns the created operation.
14820func LoadTPUEmbeddingProximalAdagradParameters(scope *Scope, parameters tf.Output, accumulators tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingProximalAdagradParametersAttr) (o *tf.Operation) {
14821	if scope.Err() != nil {
14822		return
14823	}
14824	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
14825	for _, a := range optional {
14826		a(attrs)
14827	}
14828	opspec := tf.OpSpec{
14829		Type: "LoadTPUEmbeddingProximalAdagradParameters",
14830		Input: []tf.Input{
14831			parameters, accumulators,
14832		},
14833		Attrs: attrs,
14834	}
14835	return scope.AddOperation(opspec)
14836}
14837
14838// Get the current size of the TensorArray.
14839//
14840// Arguments:
14841//	handle: The handle to a TensorArray (output of TensorArray or TensorArrayGrad).
14842//	flow_in: A float scalar that enforces proper chaining of operations.
14843//
14844// Returns The current size of the TensorArray.
14845func TensorArraySizeV3(scope *Scope, handle tf.Output, flow_in tf.Output) (size tf.Output) {
14846	if scope.Err() != nil {
14847		return
14848	}
14849	opspec := tf.OpSpec{
14850		Type: "TensorArraySizeV3",
14851		Input: []tf.Input{
14852			handle, flow_in,
14853		},
14854	}
14855	op := scope.AddOperation(opspec)
14856	return op.Output(0)
14857}
14858
14859// Computes gradients for the scaled exponential linear (Selu) operation.
14860//
14861// Arguments:
14862//	gradients: The backpropagated gradients to the corresponding Selu operation.
14863//	outputs: The outputs of the corresponding Selu operation.
14864//
14865// Returns The gradients: `gradients * (outputs + scale * alpha)`
14866// if outputs < 0, `scale * gradients` otherwise.
14867func SeluGrad(scope *Scope, gradients tf.Output, outputs tf.Output) (backprops tf.Output) {
14868	if scope.Err() != nil {
14869		return
14870	}
14871	opspec := tf.OpSpec{
14872		Type: "SeluGrad",
14873		Input: []tf.Input{
14874			gradients, outputs,
14875		},
14876	}
14877	op := scope.AddOperation(opspec)
14878	return op.Output(0)
14879}
14880
14881// ResourceSparseApplyFtrlV2Attr is an optional argument to ResourceSparseApplyFtrlV2.
14882type ResourceSparseApplyFtrlV2Attr func(optionalAttr)
14883
14884// ResourceSparseApplyFtrlV2UseLocking sets the optional use_locking attribute to value.
14885//
14886// value: If `True`, updating of the var and accum tensors will be protected
14887// by a lock; otherwise the behavior is undefined, but may exhibit less
14888// contention.
14889// If not specified, defaults to false
14890func ResourceSparseApplyFtrlV2UseLocking(value bool) ResourceSparseApplyFtrlV2Attr {
14891	return func(m optionalAttr) {
14892		m["use_locking"] = value
14893	}
14894}
14895
14896// Update relevant entries in '*var' according to the Ftrl-proximal scheme.
14897//
14898// That is for rows we have grad for, we update var, accum and linear as follows:
14899// grad_with_shrinkage = grad + 2 * l2_shrinkage * var
14900// accum_new = accum + grad_with_shrinkage * grad_with_shrinkage
14901// linear += grad_with_shrinkage +
14902//     (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
14903// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
14904// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
14905// accum = accum_new
14906//
14907// Arguments:
14908//	var_: Should be from a Variable().
14909//	accum: Should be from a Variable().
14910//	linear: Should be from a Variable().
14911//	grad: The gradient.
14912//	indices: A vector of indices into the first dimension of var and accum.
14913//	lr: Scaling factor. Must be a scalar.
14914//	l1: L1 regularization. Must be a scalar.
14915//	l2: L2 shrinkage regulariation. Must be a scalar.
14916//
14917//	lr_power: Scaling factor. Must be a scalar.
14918//
14919// Returns the created operation.
14920func ResourceSparseApplyFtrlV2(scope *Scope, var_ tf.Output, accum tf.Output, linear tf.Output, grad tf.Output, indices tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, l2_shrinkage tf.Output, lr_power tf.Output, optional ...ResourceSparseApplyFtrlV2Attr) (o *tf.Operation) {
14921	if scope.Err() != nil {
14922		return
14923	}
14924	attrs := map[string]interface{}{}
14925	for _, a := range optional {
14926		a(attrs)
14927	}
14928	opspec := tf.OpSpec{
14929		Type: "ResourceSparseApplyFtrlV2",
14930		Input: []tf.Input{
14931			var_, accum, linear, grad, indices, lr, l1, l2, l2_shrinkage, lr_power,
14932		},
14933		Attrs: attrs,
14934	}
14935	return scope.AddOperation(opspec)
14936}
14937
14938// SumAttr is an optional argument to Sum.
14939type SumAttr func(optionalAttr)
14940
14941// SumKeepDims sets the optional keep_dims attribute to value.
14942//
14943// value: If true, retain reduced dimensions with length 1.
14944// If not specified, defaults to false
14945func SumKeepDims(value bool) SumAttr {
14946	return func(m optionalAttr) {
14947		m["keep_dims"] = value
14948	}
14949}
14950
14951// Computes the sum of elements across dimensions of a tensor.
14952//
14953// Reduces `input` along the dimensions given in `axis`. Unless
14954// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
14955// `axis`. If `keep_dims` is true, the reduced dimensions are
14956// retained with length 1.
14957//
14958// Arguments:
14959//	input: The tensor to reduce.
14960//	axis: The dimensions to reduce. Must be in the range
14961// `[-rank(input), rank(input))`.
14962//
14963// Returns The reduced tensor.
14964func Sum(scope *Scope, input tf.Output, axis tf.Output, optional ...SumAttr) (output tf.Output) {
14965	if scope.Err() != nil {
14966		return
14967	}
14968	attrs := map[string]interface{}{}
14969	for _, a := range optional {
14970		a(attrs)
14971	}
14972	opspec := tf.OpSpec{
14973		Type: "Sum",
14974		Input: []tf.Input{
14975			input, axis,
14976		},
14977		Attrs: attrs,
14978	}
14979	op := scope.AddOperation(opspec)
14980	return op.Output(0)
14981}
14982
14983// SparseToSparseSetOperationAttr is an optional argument to SparseToSparseSetOperation.
14984type SparseToSparseSetOperationAttr func(optionalAttr)
14985
14986// SparseToSparseSetOperationValidateIndices sets the optional validate_indices attribute to value.
14987// If not specified, defaults to true
14988func SparseToSparseSetOperationValidateIndices(value bool) SparseToSparseSetOperationAttr {
14989	return func(m optionalAttr) {
14990		m["validate_indices"] = value
14991	}
14992}
14993
14994// Applies set operation along last dimension of 2 `SparseTensor` inputs.
14995//
14996// See SetOperationOp::SetOperationFromContext for values of `set_operation`.
14997//
14998// If `validate_indices` is `True`, `SparseToSparseSetOperation` validates the
14999// order and range of `set1` and `set2` indices.
15000//
15001// Input `set1` is a `SparseTensor` represented by `set1_indices`, `set1_values`,
15002// and `set1_shape`. For `set1` ranked `n`, 1st `n-1` dimensions must be the same
15003// as `set2`. Dimension `n` contains values in a set, duplicates are allowed but
15004// ignored.
15005//
15006// Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`,
15007// and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same
15008// as `set1`. Dimension `n` contains values in a set, duplicates are allowed but
15009// ignored.
15010//
15011// If `validate_indices` is `True`, this op validates the order and range of `set1`
15012// and `set2` indices.
15013//
15014// Output `result` is a `SparseTensor` represented by `result_indices`,
15015// `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this
15016// has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`
15017// dimension contains the result of `set_operation` applied to the corresponding
15018// `[0...n-1]` dimension of `set`.
15019//
15020// Arguments:
15021//	set1_indices: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major
15022// order.
15023//	set1_values: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major
15024// order.
15025//	set1_shape: 1D `Tensor`, shape of a `SparseTensor`. `set1_shape[0...n-1]` must
15026// be the same as `set2_shape[0...n-1]`, `set1_shape[n]` is the
15027// max set size across `0...n-1` dimensions.
15028//	set2_indices: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major
15029// order.
15030//	set2_values: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major
15031// order.
15032//	set2_shape: 1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must
15033// be the same as `set1_shape[0...n-1]`, `set2_shape[n]` is the
15034// max set size across `0...n-1` dimensions.
15035//
15036//
15037// Returns 2D indices of a `SparseTensor`.1D values of a `SparseTensor`.1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is
15038// the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`
15039// is the max result set size across all `0...n-1` dimensions.
15040func SparseToSparseSetOperation(scope *Scope, set1_indices tf.Output, set1_values tf.Output, set1_shape tf.Output, set2_indices tf.Output, set2_values tf.Output, set2_shape tf.Output, set_operation string, optional ...SparseToSparseSetOperationAttr) (result_indices tf.Output, result_values tf.Output, result_shape tf.Output) {
15041	if scope.Err() != nil {
15042		return
15043	}
15044	attrs := map[string]interface{}{"set_operation": set_operation}
15045	for _, a := range optional {
15046		a(attrs)
15047	}
15048	opspec := tf.OpSpec{
15049		Type: "SparseToSparseSetOperation",
15050		Input: []tf.Input{
15051			set1_indices, set1_values, set1_shape, set2_indices, set2_values, set2_shape,
15052		},
15053		Attrs: attrs,
15054	}
15055	op := scope.AddOperation(opspec)
15056	return op.Output(0), op.Output(1), op.Output(2)
15057}
15058
15059// Computes softmax cross entropy cost and gradients to backpropagate.
15060//
15061// Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept
15062// a matrix of label probabilities, but rather a single label per row
15063// of features.  This label is considered to have probability 1.0 for the
15064// given row.
15065//
15066// Inputs are the logits, not probabilities.
15067//
15068// Arguments:
15069//	features: batch_size x num_classes matrix
15070//	labels: batch_size vector with values in [0, num_classes).
15071// This is the label for the given minibatch entry.
15072//
15073// Returns Per example loss (batch_size vector).backpropagated gradients (batch_size x num_classes matrix).
15074func SparseSoftmaxCrossEntropyWithLogits(scope *Scope, features tf.Output, labels tf.Output) (loss tf.Output, backprop tf.Output) {
15075	if scope.Err() != nil {
15076		return
15077	}
15078	opspec := tf.OpSpec{
15079		Type: "SparseSoftmaxCrossEntropyWithLogits",
15080		Input: []tf.Input{
15081			features, labels,
15082		},
15083	}
15084	op := scope.AddOperation(opspec)
15085	return op.Output(0), op.Output(1)
15086}
15087
15088// StridedSliceGradAttr is an optional argument to StridedSliceGrad.
15089type StridedSliceGradAttr func(optionalAttr)
15090
15091// StridedSliceGradBeginMask sets the optional begin_mask attribute to value.
15092// If not specified, defaults to 0
15093func StridedSliceGradBeginMask(value int64) StridedSliceGradAttr {
15094	return func(m optionalAttr) {
15095		m["begin_mask"] = value
15096	}
15097}
15098
15099// StridedSliceGradEndMask sets the optional end_mask attribute to value.
15100// If not specified, defaults to 0
15101func StridedSliceGradEndMask(value int64) StridedSliceGradAttr {
15102	return func(m optionalAttr) {
15103		m["end_mask"] = value
15104	}
15105}
15106
15107// StridedSliceGradEllipsisMask sets the optional ellipsis_mask attribute to value.
15108// If not specified, defaults to 0
15109func StridedSliceGradEllipsisMask(value int64) StridedSliceGradAttr {
15110	return func(m optionalAttr) {
15111		m["ellipsis_mask"] = value
15112	}
15113}
15114
15115// StridedSliceGradNewAxisMask sets the optional new_axis_mask attribute to value.
15116// If not specified, defaults to 0
15117func StridedSliceGradNewAxisMask(value int64) StridedSliceGradAttr {
15118	return func(m optionalAttr) {
15119		m["new_axis_mask"] = value
15120	}
15121}
15122
15123// StridedSliceGradShrinkAxisMask sets the optional shrink_axis_mask attribute to value.
15124// If not specified, defaults to 0
15125func StridedSliceGradShrinkAxisMask(value int64) StridedSliceGradAttr {
15126	return func(m optionalAttr) {
15127		m["shrink_axis_mask"] = value
15128	}
15129}
15130
15131// Returns the gradient of `StridedSlice`.
15132//
15133// Since `StridedSlice` cuts out pieces of its `input` which is size
15134// `shape`, its gradient will have the same shape (which is passed here
15135// as `shape`). The gradient will be zero in any element that the slice
15136// does not select.
15137//
15138// Arguments are the same as StridedSliceGrad with the exception that
15139// `dy` is the input gradient to be propagated and `shape` is the
15140// shape of `StridedSlice`'s `input`.
15141func StridedSliceGrad(scope *Scope, shape tf.Output, begin tf.Output, end tf.Output, strides tf.Output, dy tf.Output, optional ...StridedSliceGradAttr) (output tf.Output) {
15142	if scope.Err() != nil {
15143		return
15144	}
15145	attrs := map[string]interface{}{}
15146	for _, a := range optional {
15147		a(attrs)
15148	}
15149	opspec := tf.OpSpec{
15150		Type: "StridedSliceGrad",
15151		Input: []tf.Input{
15152			shape, begin, end, strides, dy,
15153		},
15154		Attrs: attrs,
15155	}
15156	op := scope.AddOperation(opspec)
15157	return op.Output(0)
15158}
15159
15160// LoadTPUEmbeddingRMSPropParametersAttr is an optional argument to LoadTPUEmbeddingRMSPropParameters.
15161type LoadTPUEmbeddingRMSPropParametersAttr func(optionalAttr)
15162
15163// LoadTPUEmbeddingRMSPropParametersTableId sets the optional table_id attribute to value.
15164// If not specified, defaults to -1
15165//
15166// REQUIRES: value >= -1
15167func LoadTPUEmbeddingRMSPropParametersTableId(value int64) LoadTPUEmbeddingRMSPropParametersAttr {
15168	return func(m optionalAttr) {
15169		m["table_id"] = value
15170	}
15171}
15172
15173// LoadTPUEmbeddingRMSPropParametersTableName sets the optional table_name attribute to value.
15174// If not specified, defaults to ""
15175func LoadTPUEmbeddingRMSPropParametersTableName(value string) LoadTPUEmbeddingRMSPropParametersAttr {
15176	return func(m optionalAttr) {
15177		m["table_name"] = value
15178	}
15179}
15180
15181// Load RMSProp embedding parameters.
15182//
15183// An op that loads optimization parameters into HBM for embedding. Must be
15184// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
15185// embedding table configuration. For example, this op is used to install
15186// parameters that are loaded from a checkpoint before a training loop is
15187// executed.
15188//
15189// Arguments:
15190//	parameters: Value of parameters used in the RMSProp optimization algorithm.
15191//	ms: Value of ms used in the RMSProp optimization algorithm.
15192//	mom: Value of mom used in the RMSProp optimization algorithm.
15193//
15194//
15195//
15196// Returns the created operation.
15197func LoadTPUEmbeddingRMSPropParameters(scope *Scope, parameters tf.Output, ms tf.Output, mom tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingRMSPropParametersAttr) (o *tf.Operation) {
15198	if scope.Err() != nil {
15199		return
15200	}
15201	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
15202	for _, a := range optional {
15203		a(attrs)
15204	}
15205	opspec := tf.OpSpec{
15206		Type: "LoadTPUEmbeddingRMSPropParameters",
15207		Input: []tf.Input{
15208			parameters, ms, mom,
15209		},
15210		Attrs: attrs,
15211	}
15212	return scope.AddOperation(opspec)
15213}
15214
15215// Computes the gradient for the inverse of `x` wrt its input.
15216//
15217// Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy`
15218// is the corresponding input gradient.
15219func ReciprocalGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
15220	if scope.Err() != nil {
15221		return
15222	}
15223	opspec := tf.OpSpec{
15224		Type: "ReciprocalGrad",
15225		Input: []tf.Input{
15226			y, dy,
15227		},
15228	}
15229	op := scope.AddOperation(opspec)
15230	return op.Output(0)
15231}
15232
15233// EuclideanNormAttr is an optional argument to EuclideanNorm.
15234type EuclideanNormAttr func(optionalAttr)
15235
15236// EuclideanNormKeepDims sets the optional keep_dims attribute to value.
15237//
15238// value: If true, retain reduced dimensions with length 1.
15239// If not specified, defaults to false
15240func EuclideanNormKeepDims(value bool) EuclideanNormAttr {
15241	return func(m optionalAttr) {
15242		m["keep_dims"] = value
15243	}
15244}
15245
15246// Computes the euclidean norm of elements across dimensions of a tensor.
15247//
15248// Reduces `input` along the dimensions given in `axis`. Unless
15249// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
15250// `axis`. If `keep_dims` is true, the reduced dimensions are
15251// retained with length 1.
15252//
15253// Arguments:
15254//	input: The tensor to reduce.
15255//	axis: The dimensions to reduce. Must be in the range
15256// `[-rank(input), rank(input))`.
15257//
15258// Returns The reduced tensor.
15259func EuclideanNorm(scope *Scope, input tf.Output, axis tf.Output, optional ...EuclideanNormAttr) (output tf.Output) {
15260	if scope.Err() != nil {
15261		return
15262	}
15263	attrs := map[string]interface{}{}
15264	for _, a := range optional {
15265		a(attrs)
15266	}
15267	opspec := tf.OpSpec{
15268		Type: "EuclideanNorm",
15269		Input: []tf.Input{
15270			input, axis,
15271		},
15272		Attrs: attrs,
15273	}
15274	op := scope.AddOperation(opspec)
15275	return op.Output(0)
15276}
15277
15278// Returns the element-wise min of two SparseTensors.
15279//
15280// Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
15281//
15282// Arguments:
15283//	a_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
15284// SparseTensor, in the canonical lexicographic ordering.
15285//	a_values: 1-D.  `N` non-empty values corresponding to `a_indices`.
15286//	a_shape: 1-D.  Shape of the input SparseTensor.
15287//	b_indices: counterpart to `a_indices` for the other operand.
15288//	b_values: counterpart to `a_values` for the other operand; must be of the same dtype.
15289//	b_shape: counterpart to `a_shape` for the other operand; the two shapes must be equal.
15290//
15291// Returns 2-D.  The indices of the output SparseTensor.1-D.  The values of the output SparseTensor.
15292func SparseSparseMinimum(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, b_indices tf.Output, b_values tf.Output, b_shape tf.Output) (output_indices tf.Output, output_values tf.Output) {
15293	if scope.Err() != nil {
15294		return
15295	}
15296	opspec := tf.OpSpec{
15297		Type: "SparseSparseMinimum",
15298		Input: []tf.Input{
15299			a_indices, a_values, a_shape, b_indices, b_values, b_shape,
15300		},
15301	}
15302	op := scope.AddOperation(opspec)
15303	return op.Output(0), op.Output(1)
15304}
15305
15306// ResourceSparseApplyAdagradDAAttr is an optional argument to ResourceSparseApplyAdagradDA.
15307type ResourceSparseApplyAdagradDAAttr func(optionalAttr)
15308
15309// ResourceSparseApplyAdagradDAUseLocking sets the optional use_locking attribute to value.
15310//
15311// value: If True, updating of the var and accum tensors will be protected by
15312// a lock; otherwise the behavior is undefined, but may exhibit less contention.
15313// If not specified, defaults to false
15314func ResourceSparseApplyAdagradDAUseLocking(value bool) ResourceSparseApplyAdagradDAAttr {
15315	return func(m optionalAttr) {
15316		m["use_locking"] = value
15317	}
15318}
15319
15320// Update entries in '*var' and '*accum' according to the proximal adagrad scheme.
15321//
15322// Arguments:
15323//	var_: Should be from a Variable().
15324//	gradient_accumulator: Should be from a Variable().
15325//	gradient_squared_accumulator: Should be from a Variable().
15326//	grad: The gradient.
15327//	indices: A vector of indices into the first dimension of var and accum.
15328//	lr: Learning rate. Must be a scalar.
15329//	l1: L1 regularization. Must be a scalar.
15330//	l2: L2 regularization. Must be a scalar.
15331//	global_step: Training step number. Must be a scalar.
15332//
15333// Returns the created operation.
15334func ResourceSparseApplyAdagradDA(scope *Scope, var_ tf.Output, gradient_accumulator tf.Output, gradient_squared_accumulator tf.Output, grad tf.Output, indices tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, global_step tf.Output, optional ...ResourceSparseApplyAdagradDAAttr) (o *tf.Operation) {
15335	if scope.Err() != nil {
15336		return
15337	}
15338	attrs := map[string]interface{}{}
15339	for _, a := range optional {
15340		a(attrs)
15341	}
15342	opspec := tf.OpSpec{
15343		Type: "ResourceSparseApplyAdagradDA",
15344		Input: []tf.Input{
15345			var_, gradient_accumulator, gradient_squared_accumulator, grad, indices, lr, l1, l2, global_step,
15346		},
15347		Attrs: attrs,
15348	}
15349	return scope.AddOperation(opspec)
15350}
15351
15352// EncodeJpegAttr is an optional argument to EncodeJpeg.
15353type EncodeJpegAttr func(optionalAttr)
15354
15355// EncodeJpegFormat sets the optional format attribute to value.
15356//
15357// value: Per pixel image format.
15358// If not specified, defaults to ""
15359func EncodeJpegFormat(value string) EncodeJpegAttr {
15360	return func(m optionalAttr) {
15361		m["format"] = value
15362	}
15363}
15364
15365// EncodeJpegQuality sets the optional quality attribute to value.
15366//
15367// value: Quality of the compression from 0 to 100 (higher is better and slower).
15368// If not specified, defaults to 95
15369func EncodeJpegQuality(value int64) EncodeJpegAttr {
15370	return func(m optionalAttr) {
15371		m["quality"] = value
15372	}
15373}
15374
15375// EncodeJpegProgressive sets the optional progressive attribute to value.
15376//
15377// value: If True, create a JPEG that loads progressively (coarse to fine).
15378// If not specified, defaults to false
15379func EncodeJpegProgressive(value bool) EncodeJpegAttr {
15380	return func(m optionalAttr) {
15381		m["progressive"] = value
15382	}
15383}
15384
15385// EncodeJpegOptimizeSize sets the optional optimize_size attribute to value.
15386//
15387// value: If True, spend CPU/RAM to reduce size with no quality change.
15388// If not specified, defaults to false
15389func EncodeJpegOptimizeSize(value bool) EncodeJpegAttr {
15390	return func(m optionalAttr) {
15391		m["optimize_size"] = value
15392	}
15393}
15394
15395// EncodeJpegChromaDownsampling sets the optional chroma_downsampling attribute to value.
15396//
15397// value: See http://en.wikipedia.org/wiki/Chroma_subsampling.
15398// If not specified, defaults to true
15399func EncodeJpegChromaDownsampling(value bool) EncodeJpegAttr {
15400	return func(m optionalAttr) {
15401		m["chroma_downsampling"] = value
15402	}
15403}
15404
15405// EncodeJpegDensityUnit sets the optional density_unit attribute to value.
15406//
15407// value: Unit used to specify `x_density` and `y_density`:
15408// pixels per inch (`'in'`) or centimeter (`'cm'`).
15409// If not specified, defaults to "in"
15410func EncodeJpegDensityUnit(value string) EncodeJpegAttr {
15411	return func(m optionalAttr) {
15412		m["density_unit"] = value
15413	}
15414}
15415
15416// EncodeJpegXDensity sets the optional x_density attribute to value.
15417//
15418// value: Horizontal pixels per density unit.
15419// If not specified, defaults to 300
15420func EncodeJpegXDensity(value int64) EncodeJpegAttr {
15421	return func(m optionalAttr) {
15422		m["x_density"] = value
15423	}
15424}
15425
15426// EncodeJpegYDensity sets the optional y_density attribute to value.
15427//
15428// value: Vertical pixels per density unit.
15429// If not specified, defaults to 300
15430func EncodeJpegYDensity(value int64) EncodeJpegAttr {
15431	return func(m optionalAttr) {
15432		m["y_density"] = value
15433	}
15434}
15435
15436// EncodeJpegXmpMetadata sets the optional xmp_metadata attribute to value.
15437//
15438// value: If not empty, embed this XMP metadata in the image header.
15439// If not specified, defaults to ""
15440func EncodeJpegXmpMetadata(value string) EncodeJpegAttr {
15441	return func(m optionalAttr) {
15442		m["xmp_metadata"] = value
15443	}
15444}
15445
15446// JPEG-encode an image.
15447//
15448// `image` is a 3-D uint8 Tensor of shape `[height, width, channels]`.
15449//
15450// The attr `format` can be used to override the color format of the encoded
15451// output.  Values can be:
15452//
15453// *   `''`: Use a default format based on the number of channels in the image.
15454// *   `grayscale`: Output a grayscale JPEG image.  The `channels` dimension
15455//     of `image` must be 1.
15456// *   `rgb`: Output an RGB JPEG image. The `channels` dimension
15457//     of `image` must be 3.
15458//
15459// If `format` is not specified or is the empty string, a default format is picked
15460// in function of the number of channels in `image`:
15461//
15462// *   1: Output a grayscale image.
15463// *   3: Output an RGB image.
15464//
15465// Arguments:
15466//	image: 3-D with shape `[height, width, channels]`.
15467//
15468// Returns 0-D. JPEG-encoded image.
15469func EncodeJpeg(scope *Scope, image tf.Output, optional ...EncodeJpegAttr) (contents tf.Output) {
15470	if scope.Err() != nil {
15471		return
15472	}
15473	attrs := map[string]interface{}{}
15474	for _, a := range optional {
15475		a(attrs)
15476	}
15477	opspec := tf.OpSpec{
15478		Type: "EncodeJpeg",
15479		Input: []tf.Input{
15480			image,
15481		},
15482		Attrs: attrs,
15483	}
15484	op := scope.AddOperation(opspec)
15485	return op.Output(0)
15486}
15487
15488// MultinomialAttr is an optional argument to Multinomial.
15489type MultinomialAttr func(optionalAttr)
15490
15491// MultinomialSeed sets the optional seed attribute to value.
15492//
15493// value: If either seed or seed2 is set to be non-zero, the internal random number
15494// generator is seeded by the given seed.  Otherwise, a random seed is used.
15495// If not specified, defaults to 0
15496func MultinomialSeed(value int64) MultinomialAttr {
15497	return func(m optionalAttr) {
15498		m["seed"] = value
15499	}
15500}
15501
15502// MultinomialSeed2 sets the optional seed2 attribute to value.
15503//
15504// value: A second seed to avoid seed collision.
15505// If not specified, defaults to 0
15506func MultinomialSeed2(value int64) MultinomialAttr {
15507	return func(m optionalAttr) {
15508		m["seed2"] = value
15509	}
15510}
15511
15512// MultinomialOutputDtype sets the optional output_dtype attribute to value.
15513// If not specified, defaults to DT_INT64
15514func MultinomialOutputDtype(value tf.DataType) MultinomialAttr {
15515	return func(m optionalAttr) {
15516		m["output_dtype"] = value
15517	}
15518}
15519
15520// Draws samples from a multinomial distribution.
15521//
15522// Arguments:
15523//	logits: 2-D Tensor with shape `[batch_size, num_classes]`.  Each slice `[i, :]`
15524// represents the unnormalized log probabilities for all classes.
15525//	num_samples: 0-D.  Number of independent samples to draw for each row slice.
15526//
15527// Returns 2-D Tensor with shape `[batch_size, num_samples]`.  Each slice `[i, :]`
15528// contains the drawn class labels with range `[0, num_classes)`.
15529func Multinomial(scope *Scope, logits tf.Output, num_samples tf.Output, optional ...MultinomialAttr) (output tf.Output) {
15530	if scope.Err() != nil {
15531		return
15532	}
15533	attrs := map[string]interface{}{}
15534	for _, a := range optional {
15535		a(attrs)
15536	}
15537	opspec := tf.OpSpec{
15538		Type: "Multinomial",
15539		Input: []tf.Input{
15540			logits, num_samples,
15541		},
15542		Attrs: attrs,
15543	}
15544	op := scope.AddOperation(opspec)
15545	return op.Output(0)
15546}
15547
15548// RetrieveTPUEmbeddingRMSPropParametersAttr is an optional argument to RetrieveTPUEmbeddingRMSPropParameters.
15549type RetrieveTPUEmbeddingRMSPropParametersAttr func(optionalAttr)
15550
15551// RetrieveTPUEmbeddingRMSPropParametersTableId sets the optional table_id attribute to value.
15552// If not specified, defaults to -1
15553//
15554// REQUIRES: value >= -1
15555func RetrieveTPUEmbeddingRMSPropParametersTableId(value int64) RetrieveTPUEmbeddingRMSPropParametersAttr {
15556	return func(m optionalAttr) {
15557		m["table_id"] = value
15558	}
15559}
15560
15561// RetrieveTPUEmbeddingRMSPropParametersTableName sets the optional table_name attribute to value.
15562// If not specified, defaults to ""
15563func RetrieveTPUEmbeddingRMSPropParametersTableName(value string) RetrieveTPUEmbeddingRMSPropParametersAttr {
15564	return func(m optionalAttr) {
15565		m["table_name"] = value
15566	}
15567}
15568
15569// Retrieve RMSProp embedding parameters.
15570//
15571// An op that retrieves optimization parameters from embedding to host
15572// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
15573// the correct embedding table configuration. For example, this op is
15574// used to retrieve updated parameters before saving a checkpoint.
15575//
15576// Returns Parameter parameters updated by the RMSProp optimization algorithm.Parameter ms updated by the RMSProp optimization algorithm.Parameter mom updated by the RMSProp optimization algorithm.
15577func RetrieveTPUEmbeddingRMSPropParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingRMSPropParametersAttr) (parameters tf.Output, ms tf.Output, mom tf.Output) {
15578	if scope.Err() != nil {
15579		return
15580	}
15581	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
15582	for _, a := range optional {
15583		a(attrs)
15584	}
15585	opspec := tf.OpSpec{
15586		Type: "RetrieveTPUEmbeddingRMSPropParameters",
15587
15588		Attrs: attrs,
15589	}
15590	op := scope.AddOperation(opspec)
15591	return op.Output(0), op.Output(1), op.Output(2)
15592}
15593
15594// QuantizedRelu6Attr is an optional argument to QuantizedRelu6.
15595type QuantizedRelu6Attr func(optionalAttr)
15596
15597// QuantizedRelu6OutType sets the optional out_type attribute to value.
15598// If not specified, defaults to DT_QUINT8
15599func QuantizedRelu6OutType(value tf.DataType) QuantizedRelu6Attr {
15600	return func(m optionalAttr) {
15601		m["out_type"] = value
15602	}
15603}
15604
15605// Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)`
15606//
15607// Arguments:
15608//
15609//	min_features: The float value that the lowest quantized value represents.
15610//	max_features: The float value that the highest quantized value represents.
15611//
15612// Returns Has the same output shape as "features".The float value that the lowest quantized value represents.The float value that the highest quantized value represents.
15613func QuantizedRelu6(scope *Scope, features tf.Output, min_features tf.Output, max_features tf.Output, optional ...QuantizedRelu6Attr) (activations tf.Output, min_activations tf.Output, max_activations tf.Output) {
15614	if scope.Err() != nil {
15615		return
15616	}
15617	attrs := map[string]interface{}{}
15618	for _, a := range optional {
15619		a(attrs)
15620	}
15621	opspec := tf.OpSpec{
15622		Type: "QuantizedRelu6",
15623		Input: []tf.Input{
15624			features, min_features, max_features,
15625		},
15626		Attrs: attrs,
15627	}
15628	op := scope.AddOperation(opspec)
15629	return op.Output(0), op.Output(1), op.Output(2)
15630}
15631
15632// BatchMatMulAttr is an optional argument to BatchMatMul.
15633type BatchMatMulAttr func(optionalAttr)
15634
15635// BatchMatMulAdjX sets the optional adj_x attribute to value.
15636//
15637// value: If `True`, adjoint the slices of `x`. Defaults to `False`.
15638// If not specified, defaults to false
15639func BatchMatMulAdjX(value bool) BatchMatMulAttr {
15640	return func(m optionalAttr) {
15641		m["adj_x"] = value
15642	}
15643}
15644
15645// BatchMatMulAdjY sets the optional adj_y attribute to value.
15646//
15647// value: If `True`, adjoint the slices of `y`. Defaults to `False`.
15648// If not specified, defaults to false
15649func BatchMatMulAdjY(value bool) BatchMatMulAttr {
15650	return func(m optionalAttr) {
15651		m["adj_y"] = value
15652	}
15653}
15654
15655// Multiplies slices of two tensors in batches.
15656//
15657// Multiplies all slices of `Tensor` `x` and `y` (each slice can be
15658// viewed as an element of a batch), and arranges the individual results
15659// in a single output tensor of the same batch size. Each of the
15660// individual slices can optionally be adjointed (to adjoint a matrix
15661// means to transpose and conjugate it) before multiplication by setting
15662// the `adj_x` or `adj_y` flag to `True`, which are by default `False`.
15663//
15664// The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]`
15665// and `[..., r_y, c_y]`.
15666//
15667// The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where:
15668//
15669//     r_o = c_x if adj_x else r_x
15670//     c_o = r_y if adj_y else c_y
15671//
15672// It is computed as:
15673//
15674//     output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])
15675//
15676// Arguments:
15677//	x: 2-D or higher with shape `[..., r_x, c_x]`.
15678//	y: 2-D or higher with shape `[..., r_y, c_y]`.
15679//
15680// Returns 3-D or higher with shape `[..., r_o, c_o]`
15681func BatchMatMul(scope *Scope, x tf.Output, y tf.Output, optional ...BatchMatMulAttr) (output tf.Output) {
15682	if scope.Err() != nil {
15683		return
15684	}
15685	attrs := map[string]interface{}{}
15686	for _, a := range optional {
15687		a(attrs)
15688	}
15689	opspec := tf.OpSpec{
15690		Type: "BatchMatMul",
15691		Input: []tf.Input{
15692			x, y,
15693		},
15694		Attrs: attrs,
15695	}
15696	op := scope.AddOperation(opspec)
15697	return op.Output(0)
15698}
15699
15700// ParseSequenceExampleAttr is an optional argument to ParseSequenceExample.
15701type ParseSequenceExampleAttr func(optionalAttr)
15702
15703// ParseSequenceExampleNcontextSparse sets the optional Ncontext_sparse attribute to value.
15704// If not specified, defaults to 0
15705//
15706// REQUIRES: value >= 0
15707func ParseSequenceExampleNcontextSparse(value int64) ParseSequenceExampleAttr {
15708	return func(m optionalAttr) {
15709		m["Ncontext_sparse"] = value
15710	}
15711}
15712
15713// ParseSequenceExampleNcontextDense sets the optional Ncontext_dense attribute to value.
15714// If not specified, defaults to 0
15715//
15716// REQUIRES: value >= 0
15717func ParseSequenceExampleNcontextDense(value int64) ParseSequenceExampleAttr {
15718	return func(m optionalAttr) {
15719		m["Ncontext_dense"] = value
15720	}
15721}
15722
15723// ParseSequenceExampleNfeatureListSparse sets the optional Nfeature_list_sparse attribute to value.
15724// If not specified, defaults to 0
15725//
15726// REQUIRES: value >= 0
15727func ParseSequenceExampleNfeatureListSparse(value int64) ParseSequenceExampleAttr {
15728	return func(m optionalAttr) {
15729		m["Nfeature_list_sparse"] = value
15730	}
15731}
15732
15733// ParseSequenceExampleNfeatureListDense sets the optional Nfeature_list_dense attribute to value.
15734// If not specified, defaults to 0
15735//
15736// REQUIRES: value >= 0
15737func ParseSequenceExampleNfeatureListDense(value int64) ParseSequenceExampleAttr {
15738	return func(m optionalAttr) {
15739		m["Nfeature_list_dense"] = value
15740	}
15741}
15742
15743// ParseSequenceExampleContextSparseTypes sets the optional context_sparse_types attribute to value.
15744//
15745// value: A list of Ncontext_sparse types; the data types of data in
15746// each context Feature given in context_sparse_keys.
15747// Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),
15748// DT_INT64 (Int64List), and DT_STRING (BytesList).
15749// If not specified, defaults to <>
15750//
15751// REQUIRES: len(value) >= 0
15752func ParseSequenceExampleContextSparseTypes(value []tf.DataType) ParseSequenceExampleAttr {
15753	return func(m optionalAttr) {
15754		m["context_sparse_types"] = value
15755	}
15756}
15757
15758// ParseSequenceExampleFeatureListDenseTypes sets the optional feature_list_dense_types attribute to value.
15759// If not specified, defaults to <>
15760//
15761// REQUIRES: len(value) >= 0
15762func ParseSequenceExampleFeatureListDenseTypes(value []tf.DataType) ParseSequenceExampleAttr {
15763	return func(m optionalAttr) {
15764		m["feature_list_dense_types"] = value
15765	}
15766}
15767
15768// ParseSequenceExampleContextDenseShapes sets the optional context_dense_shapes attribute to value.
15769//
15770// value: A list of Ncontext_dense shapes; the shapes of data in
15771// each context Feature given in context_dense_keys.
15772// The number of elements in the Feature corresponding to context_dense_key[j]
15773// must always equal context_dense_shapes[j].NumEntries().
15774// The shape of context_dense_values[j] will match context_dense_shapes[j].
15775// If not specified, defaults to <>
15776//
15777// REQUIRES: len(value) >= 0
15778func ParseSequenceExampleContextDenseShapes(value []tf.Shape) ParseSequenceExampleAttr {
15779	return func(m optionalAttr) {
15780		m["context_dense_shapes"] = value
15781	}
15782}
15783
15784// ParseSequenceExampleFeatureListSparseTypes sets the optional feature_list_sparse_types attribute to value.
15785//
15786// value: A list of Nfeature_list_sparse types; the data types
15787// of data in each FeatureList given in feature_list_sparse_keys.
15788// Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),
15789// DT_INT64 (Int64List), and DT_STRING (BytesList).
15790// If not specified, defaults to <>
15791//
15792// REQUIRES: len(value) >= 0
15793func ParseSequenceExampleFeatureListSparseTypes(value []tf.DataType) ParseSequenceExampleAttr {
15794	return func(m optionalAttr) {
15795		m["feature_list_sparse_types"] = value
15796	}
15797}
15798
15799// ParseSequenceExampleFeatureListDenseShapes sets the optional feature_list_dense_shapes attribute to value.
15800//
15801// value: A list of Nfeature_list_dense shapes; the shapes of
15802// data in each FeatureList given in feature_list_dense_keys.
15803// The shape of each Feature in the FeatureList corresponding to
15804// feature_list_dense_key[j] must always equal
15805// feature_list_dense_shapes[j].NumEntries().
15806// If not specified, defaults to <>
15807//
15808// REQUIRES: len(value) >= 0
15809func ParseSequenceExampleFeatureListDenseShapes(value []tf.Shape) ParseSequenceExampleAttr {
15810	return func(m optionalAttr) {
15811		m["feature_list_dense_shapes"] = value
15812	}
15813}
15814
15815// Transforms a vector of brain.SequenceExample protos (as strings) into typed tensors.
15816//
15817// Arguments:
15818//	serialized: A vector containing binary serialized SequenceExample protos.
15819//	debug_name: A vector containing the names of the serialized protos.
15820// May contain, for example, table key (descriptive) name for the
15821// corresponding serialized proto.  This is purely useful for debugging
15822// purposes, and the presence of values here has no effect on the output.
15823// May also be an empty vector if no name is available.
15824//	context_dense_defaults: A list of Ncontext_dense Tensors (some may be empty).
15825// context_dense_defaults[j] provides default values
15826// when the SequenceExample's context map lacks context_dense_key[j].
15827// If an empty Tensor is provided for context_dense_defaults[j],
15828// then the Feature context_dense_keys[j] is required.
15829// The input type is inferred from context_dense_defaults[j], even when it's
15830// empty.  If context_dense_defaults[j] is not empty, its shape must match
15831// context_dense_shapes[j].
15832//	feature_list_dense_missing_assumed_empty: A vector listing the
15833// FeatureList keys which may be missing from the SequenceExamples.  If the
15834// associated FeatureList is missing, it is treated as empty.  By default,
15835// any FeatureList not listed in this vector must exist in the SequenceExamples.
15836//	context_sparse_keys: A list of Ncontext_sparse string Tensors (scalars).
15837// The keys expected in the Examples' features associated with context_sparse
15838// values.
15839//	context_dense_keys: A list of Ncontext_dense string Tensors (scalars).
15840// The keys expected in the SequenceExamples' context features associated with
15841// dense values.
15842//	feature_list_sparse_keys: A list of Nfeature_list_sparse string Tensors
15843// (scalars).  The keys expected in the FeatureLists associated with sparse
15844// values.
15845//	feature_list_dense_keys: A list of Nfeature_list_dense string Tensors (scalars).
15846// The keys expected in the SequenceExamples' feature_lists associated
15847// with lists of dense values.
15848func ParseSequenceExample(scope *Scope, serialized tf.Output, debug_name tf.Output, context_dense_defaults []tf.Output, feature_list_dense_missing_assumed_empty []string, context_sparse_keys []string, context_dense_keys []string, feature_list_sparse_keys []string, feature_list_dense_keys []string, optional ...ParseSequenceExampleAttr) (context_sparse_indices []tf.Output, context_sparse_values []tf.Output, context_sparse_shapes []tf.Output, context_dense_values []tf.Output, feature_list_sparse_indices []tf.Output, feature_list_sparse_values []tf.Output, feature_list_sparse_shapes []tf.Output, feature_list_dense_values []tf.Output, feature_list_dense_lengths []tf.Output) {
15849	if scope.Err() != nil {
15850		return
15851	}
15852	attrs := map[string]interface{}{"feature_list_dense_missing_assumed_empty": feature_list_dense_missing_assumed_empty, "context_sparse_keys": context_sparse_keys, "context_dense_keys": context_dense_keys, "feature_list_sparse_keys": feature_list_sparse_keys, "feature_list_dense_keys": feature_list_dense_keys}
15853	for _, a := range optional {
15854		a(attrs)
15855	}
15856	opspec := tf.OpSpec{
15857		Type: "ParseSequenceExample",
15858		Input: []tf.Input{
15859			serialized, debug_name, tf.OutputList(context_dense_defaults),
15860		},
15861		Attrs: attrs,
15862	}
15863	op := scope.AddOperation(opspec)
15864	if scope.Err() != nil {
15865		return
15866	}
15867	var idx int
15868	var err error
15869	if context_sparse_indices, idx, err = makeOutputList(op, idx, "context_sparse_indices"); err != nil {
15870		scope.UpdateErr("ParseSequenceExample", err)
15871		return
15872	}
15873	if context_sparse_values, idx, err = makeOutputList(op, idx, "context_sparse_values"); err != nil {
15874		scope.UpdateErr("ParseSequenceExample", err)
15875		return
15876	}
15877	if context_sparse_shapes, idx, err = makeOutputList(op, idx, "context_sparse_shapes"); err != nil {
15878		scope.UpdateErr("ParseSequenceExample", err)
15879		return
15880	}
15881	if context_dense_values, idx, err = makeOutputList(op, idx, "context_dense_values"); err != nil {
15882		scope.UpdateErr("ParseSequenceExample", err)
15883		return
15884	}
15885	if feature_list_sparse_indices, idx, err = makeOutputList(op, idx, "feature_list_sparse_indices"); err != nil {
15886		scope.UpdateErr("ParseSequenceExample", err)
15887		return
15888	}
15889	if feature_list_sparse_values, idx, err = makeOutputList(op, idx, "feature_list_sparse_values"); err != nil {
15890		scope.UpdateErr("ParseSequenceExample", err)
15891		return
15892	}
15893	if feature_list_sparse_shapes, idx, err = makeOutputList(op, idx, "feature_list_sparse_shapes"); err != nil {
15894		scope.UpdateErr("ParseSequenceExample", err)
15895		return
15896	}
15897	if feature_list_dense_values, idx, err = makeOutputList(op, idx, "feature_list_dense_values"); err != nil {
15898		scope.UpdateErr("ParseSequenceExample", err)
15899		return
15900	}
15901	if feature_list_dense_lengths, idx, err = makeOutputList(op, idx, "feature_list_dense_lengths"); err != nil {
15902		scope.UpdateErr("ParseSequenceExample", err)
15903		return
15904	}
15905	return context_sparse_indices, context_sparse_values, context_sparse_shapes, context_dense_values, feature_list_sparse_indices, feature_list_sparse_values, feature_list_sparse_shapes, feature_list_dense_values, feature_list_dense_lengths
15906}
15907
15908// QuantizedReluAttr is an optional argument to QuantizedRelu.
15909type QuantizedReluAttr func(optionalAttr)
15910
15911// QuantizedReluOutType sets the optional out_type attribute to value.
15912// If not specified, defaults to DT_QUINT8
15913func QuantizedReluOutType(value tf.DataType) QuantizedReluAttr {
15914	return func(m optionalAttr) {
15915		m["out_type"] = value
15916	}
15917}
15918
15919// Computes Quantized Rectified Linear: `max(features, 0)`
15920//
15921// Arguments:
15922//
15923//	min_features: The float value that the lowest quantized value represents.
15924//	max_features: The float value that the highest quantized value represents.
15925//
15926// Returns Has the same output shape as "features".The float value that the lowest quantized value represents.The float value that the highest quantized value represents.
15927func QuantizedRelu(scope *Scope, features tf.Output, min_features tf.Output, max_features tf.Output, optional ...QuantizedReluAttr) (activations tf.Output, min_activations tf.Output, max_activations tf.Output) {
15928	if scope.Err() != nil {
15929		return
15930	}
15931	attrs := map[string]interface{}{}
15932	for _, a := range optional {
15933		a(attrs)
15934	}
15935	opspec := tf.OpSpec{
15936		Type: "QuantizedRelu",
15937		Input: []tf.Input{
15938			features, min_features, max_features,
15939		},
15940		Attrs: attrs,
15941	}
15942	op := scope.AddOperation(opspec)
15943	return op.Output(0), op.Output(1), op.Output(2)
15944}
15945
15946// Reorders a SparseTensor into the canonical, row-major ordering.
15947//
15948// Note that by convention, all sparse ops preserve the canonical ordering along
15949// increasing dimension number. The only time ordering can be violated is during
15950// manual manipulation of the indices and values vectors to add entries.
15951//
15952// Reordering does not affect the shape of the SparseTensor.
15953//
15954// If the tensor has rank `R` and `N` non-empty values, `input_indices` has
15955// shape `[N, R]`, input_values has length `N`, and input_shape has length `R`.
15956//
15957// Arguments:
15958//	input_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
15959// SparseTensor, possibly not in canonical ordering.
15960//	input_values: 1-D.  `N` non-empty values corresponding to `input_indices`.
15961//	input_shape: 1-D.  Shape of the input SparseTensor.
15962//
15963// Returns 2-D.  `N x R` matrix with the same indices as input_indices, but
15964// in canonical row-major ordering.1-D.  `N` non-empty values corresponding to `output_indices`.
15965func SparseReorder(scope *Scope, input_indices tf.Output, input_values tf.Output, input_shape tf.Output) (output_indices tf.Output, output_values tf.Output) {
15966	if scope.Err() != nil {
15967		return
15968	}
15969	opspec := tf.OpSpec{
15970		Type: "SparseReorder",
15971		Input: []tf.Input{
15972			input_indices, input_values, input_shape,
15973		},
15974	}
15975	op := scope.AddOperation(opspec)
15976	return op.Output(0), op.Output(1)
15977}
15978
15979// PrelinearizeTupleAttr is an optional argument to PrelinearizeTuple.
15980type PrelinearizeTupleAttr func(optionalAttr)
15981
15982// PrelinearizeTupleLayouts sets the optional layouts attribute to value.
15983//
15984// value: A vector holding the requested layout in minor-to-major sequence for all the
15985// tuple shapes in the order the shapes appear in the "shapes" input. The layout
15986// elements for a sub-shape can be set to -1 in which case the corresponding layout
15987// will be computed by the infeed operation.
15988// If not specified, defaults to <>
15989func PrelinearizeTupleLayouts(value []int64) PrelinearizeTupleAttr {
15990	return func(m optionalAttr) {
15991		m["layouts"] = value
15992	}
15993}
15994
15995// An op which linearizes multiple Tensor values to an opaque variant tensor.
15996//
15997// Arguments:
15998//	inputs: A list of tensors that will be provided using the infeed mechanism.
15999//	shapes: The shapes of each tensor in `inputs`.
16000func PrelinearizeTuple(scope *Scope, inputs []tf.Output, shapes []tf.Shape, optional ...PrelinearizeTupleAttr) (output tf.Output) {
16001	if scope.Err() != nil {
16002		return
16003	}
16004	attrs := map[string]interface{}{"shapes": shapes}
16005	for _, a := range optional {
16006		a(attrs)
16007	}
16008	opspec := tf.OpSpec{
16009		Type: "PrelinearizeTuple",
16010		Input: []tf.Input{
16011			tf.OutputList(inputs),
16012		},
16013		Attrs: attrs,
16014	}
16015	op := scope.AddOperation(opspec)
16016	return op.Output(0)
16017}
16018
16019// ComplexAbsAttr is an optional argument to ComplexAbs.
16020type ComplexAbsAttr func(optionalAttr)
16021
16022// ComplexAbsTout sets the optional Tout attribute to value.
16023// If not specified, defaults to DT_FLOAT
16024func ComplexAbsTout(value tf.DataType) ComplexAbsAttr {
16025	return func(m optionalAttr) {
16026		m["Tout"] = value
16027	}
16028}
16029
16030// Computes the complex absolute value of a tensor.
16031//
16032// Given a tensor `x` of complex numbers, this operation returns a tensor of type
16033// `float` or `double` that is the absolute value of each element in `x`. All
16034// elements in `x` must be complex numbers of the form \\(a + bj\\). The absolute
16035// value is computed as \\( \sqrt{a^2 + b^2}\\).
16036func ComplexAbs(scope *Scope, x tf.Output, optional ...ComplexAbsAttr) (y tf.Output) {
16037	if scope.Err() != nil {
16038		return
16039	}
16040	attrs := map[string]interface{}{}
16041	for _, a := range optional {
16042		a(attrs)
16043	}
16044	opspec := tf.OpSpec{
16045		Type: "ComplexAbs",
16046		Input: []tf.Input{
16047			x,
16048		},
16049		Attrs: attrs,
16050	}
16051	op := scope.AddOperation(opspec)
16052	return op.Output(0)
16053}
16054
16055// VariableShapeAttr is an optional argument to VariableShape.
16056type VariableShapeAttr func(optionalAttr)
16057
16058// VariableShapeOutType sets the optional out_type attribute to value.
16059// If not specified, defaults to DT_INT32
16060func VariableShapeOutType(value tf.DataType) VariableShapeAttr {
16061	return func(m optionalAttr) {
16062		m["out_type"] = value
16063	}
16064}
16065
16066// Returns the shape of the variable pointed to by `resource`.
16067//
16068// This operation returns a 1-D integer tensor representing the shape of `input`.
16069//
16070// For example:
16071//
16072// ```
16073// # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
16074// shape(t) ==> [2, 2, 3]
16075// ```
16076func VariableShape(scope *Scope, input tf.Output, optional ...VariableShapeAttr) (output tf.Output) {
16077	if scope.Err() != nil {
16078		return
16079	}
16080	attrs := map[string]interface{}{}
16081	for _, a := range optional {
16082		a(attrs)
16083	}
16084	opspec := tf.OpSpec{
16085		Type: "VariableShape",
16086		Input: []tf.Input{
16087			input,
16088		},
16089		Attrs: attrs,
16090	}
16091	op := scope.AddOperation(opspec)
16092	return op.Output(0)
16093}
16094
16095// CompilationResultProto indicating the status of the TPU compilation.
16096func TPUCompilationResult(scope *Scope) (output tf.Output) {
16097	if scope.Err() != nil {
16098		return
16099	}
16100	opspec := tf.OpSpec{
16101		Type: "TPUCompilationResult",
16102	}
16103	op := scope.AddOperation(opspec)
16104	return op.Output(0)
16105}
16106
16107// Retrieves the tree ensemble resource stamp token, number of trees and growing statistics.
16108//
16109// Arguments:
16110//	tree_ensemble_handle: Handle to the tree ensemble.
16111//
16112// Returns Stamp token of the tree ensemble resource.The number of trees in the tree ensemble resource.The number of trees that were finished successfully.The number of layers we attempted to build (but not necessarily succeeded).Rank size 2 tensor that contains start and end ids of the nodes in the latest
16113// layer.
16114func BoostedTreesGetEnsembleStates(scope *Scope, tree_ensemble_handle tf.Output) (stamp_token tf.Output, num_trees tf.Output, num_finalized_trees tf.Output, num_attempted_layers tf.Output, last_layer_nodes_range tf.Output) {
16115	if scope.Err() != nil {
16116		return
16117	}
16118	opspec := tf.OpSpec{
16119		Type: "BoostedTreesGetEnsembleStates",
16120		Input: []tf.Input{
16121			tree_ensemble_handle,
16122		},
16123	}
16124	op := scope.AddOperation(opspec)
16125	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
16126}
16127
16128// Store the input tensor in the state of the current session.
16129//
16130// Arguments:
16131//	value: The tensor to be stored.
16132//
16133// Returns The handle for the tensor stored in the session state, represented
16134// as a ResourceHandle object.
16135func GetSessionHandleV2(scope *Scope, value tf.Output) (handle tf.Output) {
16136	if scope.Err() != nil {
16137		return
16138	}
16139	opspec := tf.OpSpec{
16140		Type: "GetSessionHandleV2",
16141		Input: []tf.Input{
16142			value,
16143		},
16144	}
16145	op := scope.AddOperation(opspec)
16146	return op.Output(0)
16147}
16148
16149// ResourceApplyAdamAttr is an optional argument to ResourceApplyAdam.
16150type ResourceApplyAdamAttr func(optionalAttr)
16151
16152// ResourceApplyAdamUseLocking sets the optional use_locking attribute to value.
16153//
16154// value: If `True`, updating of the var, m, and v tensors will be protected
16155// by a lock; otherwise the behavior is undefined, but may exhibit less
16156// contention.
16157// If not specified, defaults to false
16158func ResourceApplyAdamUseLocking(value bool) ResourceApplyAdamAttr {
16159	return func(m optionalAttr) {
16160		m["use_locking"] = value
16161	}
16162}
16163
16164// ResourceApplyAdamUseNesterov sets the optional use_nesterov attribute to value.
16165//
16166// value: If `True`, uses the nesterov update.
16167// If not specified, defaults to false
16168func ResourceApplyAdamUseNesterov(value bool) ResourceApplyAdamAttr {
16169	return func(m optionalAttr) {
16170		m["use_nesterov"] = value
16171	}
16172}
16173
16174// Update '*var' according to the Adam algorithm.
16175//
16176// $$lr_t := \text{learning\_rate} * \sqrt{1 - beta_2^t} / (1 - beta_1^t)$$
16177// $$m_t := beta_1 * m_{t-1} + (1 - beta_1) * g$$
16178// $$v_t := beta_2 * v_{t-1} + (1 - beta_2) * g * g$$
16179// $$variable := variable - lr_t * m_t / (\sqrt{v_t} + \epsilon)$$
16180//
16181// Arguments:
16182//	var_: Should be from a Variable().
16183//	m: Should be from a Variable().
16184//	v: Should be from a Variable().
16185//	beta1_power: Must be a scalar.
16186//	beta2_power: Must be a scalar.
16187//	lr: Scaling factor. Must be a scalar.
16188//	beta1: Momentum factor. Must be a scalar.
16189//	beta2: Momentum factor. Must be a scalar.
16190//	epsilon: Ridge term. Must be a scalar.
16191//	grad: The gradient.
16192//
16193// Returns the created operation.
16194func ResourceApplyAdam(scope *Scope, var_ tf.Output, m tf.Output, v tf.Output, beta1_power tf.Output, beta2_power tf.Output, lr tf.Output, beta1 tf.Output, beta2 tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyAdamAttr) (o *tf.Operation) {
16195	if scope.Err() != nil {
16196		return
16197	}
16198	attrs := map[string]interface{}{}
16199	for _, a := range optional {
16200		a(attrs)
16201	}
16202	opspec := tf.OpSpec{
16203		Type: "ResourceApplyAdam",
16204		Input: []tf.Input{
16205			var_, m, v, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad,
16206		},
16207		Attrs: attrs,
16208	}
16209	return scope.AddOperation(opspec)
16210}
16211
16212// SdcaOptimizerAttr is an optional argument to SdcaOptimizer.
16213type SdcaOptimizerAttr func(optionalAttr)
16214
16215// SdcaOptimizerAdaptative sets the optional adaptative attribute to value.
16216//
16217// value: Whether to use Adaptive SDCA for the inner loop.
16218// If not specified, defaults to true
16219func SdcaOptimizerAdaptative(value bool) SdcaOptimizerAttr {
16220	return func(m optionalAttr) {
16221		m["adaptative"] = value
16222	}
16223}
16224
16225// Distributed version of Stochastic Dual Coordinate Ascent (SDCA) optimizer for
16226//
16227// linear models with L1 + L2 regularization. As global optimization objective is
16228// strongly-convex, the optimizer optimizes the dual objective at each step. The
16229// optimizer applies each update one example at a time. Examples are sampled
16230// uniformly, and the optimizer is learning rate free and enjoys linear convergence
16231// rate.
16232//
16233// [Proximal Stochastic Dual Coordinate Ascent](http://arxiv.org/pdf/1211.2717v1.pdf).<br>
16234// Shai Shalev-Shwartz, Tong Zhang. 2012
16235//
16236// $$Loss Objective = \sum f_{i} (wx_{i}) + (l2 / 2) * |w|^2 + l1 * |w|$$
16237//
16238// [Adding vs. Averaging in Distributed Primal-Dual Optimization](http://arxiv.org/abs/1502.03508).<br>
16239// Chenxin Ma, Virginia Smith, Martin Jaggi, Michael I. Jordan,
16240// Peter Richtarik, Martin Takac. 2015
16241//
16242// [Stochastic Dual Coordinate Ascent with Adaptive Probabilities](https://arxiv.org/abs/1502.08053).<br>
16243// Dominik Csiba, Zheng Qu, Peter Richtarik. 2015
16244//
16245// Arguments:
16246//	sparse_example_indices: a list of vectors which contain example indices.
16247//	sparse_feature_indices: a list of vectors which contain feature indices.
16248//	sparse_feature_values: a list of vectors which contains feature value
16249// associated with each feature group.
16250//	dense_features: a list of matrices which contains the dense feature values.
16251//	example_weights: a vector which contains the weight associated with each
16252// example.
16253//	example_labels: a vector which contains the label/target associated with each
16254// example.
16255//	sparse_indices: a list of vectors where each value is the indices which has
16256// corresponding weights in sparse_weights. This field maybe omitted for the
16257// dense approach.
16258//	sparse_weights: a list of vectors where each value is the weight associated with
16259// a sparse feature group.
16260//	dense_weights: a list of vectors where the values are the weights associated
16261// with a dense feature group.
16262//	example_state_data: a list of vectors containing the example state data.
16263//	loss_type: Type of the primal loss. Currently SdcaSolver supports logistic,
16264// squared and hinge losses.
16265//	l1: Symmetric l1 regularization strength.
16266//	l2: Symmetric l2 regularization strength.
16267//	num_loss_partitions: Number of partitions of the global loss function.
16268//	num_inner_iterations: Number of iterations per mini-batch.
16269//
16270// Returns a list of vectors containing the updated example state
16271// data.a list of vectors where each value is the delta
16272// weights associated with a sparse feature group.a list of vectors where the values are the delta
16273// weights associated with a dense feature group.
16274func SdcaOptimizer(scope *Scope, sparse_example_indices []tf.Output, sparse_feature_indices []tf.Output, sparse_feature_values []tf.Output, dense_features []tf.Output, example_weights tf.Output, example_labels tf.Output, sparse_indices []tf.Output, sparse_weights []tf.Output, dense_weights []tf.Output, example_state_data tf.Output, loss_type string, l1 float32, l2 float32, num_loss_partitions int64, num_inner_iterations int64, optional ...SdcaOptimizerAttr) (out_example_state_data tf.Output, out_delta_sparse_weights []tf.Output, out_delta_dense_weights []tf.Output) {
16275	if scope.Err() != nil {
16276		return
16277	}
16278	attrs := map[string]interface{}{"loss_type": loss_type, "l1": l1, "l2": l2, "num_loss_partitions": num_loss_partitions, "num_inner_iterations": num_inner_iterations}
16279	for _, a := range optional {
16280		a(attrs)
16281	}
16282	opspec := tf.OpSpec{
16283		Type: "SdcaOptimizer",
16284		Input: []tf.Input{
16285			tf.OutputList(sparse_example_indices), tf.OutputList(sparse_feature_indices), tf.OutputList(sparse_feature_values), tf.OutputList(dense_features), example_weights, example_labels, tf.OutputList(sparse_indices), tf.OutputList(sparse_weights), tf.OutputList(dense_weights), example_state_data,
16286		},
16287		Attrs: attrs,
16288	}
16289	op := scope.AddOperation(opspec)
16290	if scope.Err() != nil {
16291		return
16292	}
16293	var idx int
16294	var err error
16295	out_example_state_data = op.Output(idx)
16296	if out_delta_sparse_weights, idx, err = makeOutputList(op, idx, "out_delta_sparse_weights"); err != nil {
16297		scope.UpdateErr("SdcaOptimizer", err)
16298		return
16299	}
16300	if out_delta_dense_weights, idx, err = makeOutputList(op, idx, "out_delta_dense_weights"); err != nil {
16301		scope.UpdateErr("SdcaOptimizer", err)
16302		return
16303	}
16304	return out_example_state_data, out_delta_sparse_weights, out_delta_dense_weights
16305}
16306
16307// ExperimentalParseExampleDatasetAttr is an optional argument to ExperimentalParseExampleDataset.
16308type ExperimentalParseExampleDatasetAttr func(optionalAttr)
16309
16310// ExperimentalParseExampleDatasetSloppy sets the optional sloppy attribute to value.
16311// If not specified, defaults to false
16312func ExperimentalParseExampleDatasetSloppy(value bool) ExperimentalParseExampleDatasetAttr {
16313	return func(m optionalAttr) {
16314		m["sloppy"] = value
16315	}
16316}
16317
16318// Transforms `input_dataset` containing `Example` protos as vectors of DT_STRING into a dataset of `Tensor` or `SparseTensor` objects representing the parsed features.
16319//
16320// Arguments:
16321//
16322//
16323//	dense_defaults: A dict mapping string keys to `Tensor`s.
16324// The keys of the dict must match the dense_keys of the feature.
16325//	sparse_keys: A list of string keys in the examples features.
16326// The results for these keys will be returned as `SparseTensor` objects.
16327//	dense_keys: A list of Ndense string Tensors (scalars).
16328// The keys expected in the Examples features associated with dense values.
16329//	sparse_types: A list of `DTypes` of the same length as `sparse_keys`.
16330// Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
16331// and `tf.string` (`BytesList`) are supported.
16332//	dense_shapes: List of tuples with the same length as `dense_keys`.
16333// The shape of the data for each dense feature referenced by `dense_keys`.
16334// Required for any input tensors identified by `dense_keys`.  Must be
16335// either fully defined, or may contain an unknown first dimension.
16336// An unknown first dimension means the feature is treated as having
16337// a variable number of blocks, and the output shape along this dimension
16338// is considered unknown at graph build time.  Padding is applied for
16339// minibatch elements smaller than the maximum number of blocks for the
16340// given feature along this dimension.
16341//	output_types: The type list for the return values.
16342//	output_shapes: The list of shapes being produced.
16343func ExperimentalParseExampleDataset(scope *Scope, input_dataset tf.Output, num_parallel_calls tf.Output, dense_defaults []tf.Output, sparse_keys []string, dense_keys []string, sparse_types []tf.DataType, dense_shapes []tf.Shape, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ExperimentalParseExampleDatasetAttr) (handle tf.Output) {
16344	if scope.Err() != nil {
16345		return
16346	}
16347	attrs := map[string]interface{}{"sparse_keys": sparse_keys, "dense_keys": dense_keys, "sparse_types": sparse_types, "dense_shapes": dense_shapes, "output_types": output_types, "output_shapes": output_shapes}
16348	for _, a := range optional {
16349		a(attrs)
16350	}
16351	opspec := tf.OpSpec{
16352		Type: "ExperimentalParseExampleDataset",
16353		Input: []tf.Input{
16354			input_dataset, num_parallel_calls, tf.OutputList(dense_defaults),
16355		},
16356		Attrs: attrs,
16357	}
16358	op := scope.AddOperation(opspec)
16359	return op.Output(0)
16360}
16361
16362// PrelinearizeAttr is an optional argument to Prelinearize.
16363type PrelinearizeAttr func(optionalAttr)
16364
16365// PrelinearizeShape sets the optional shape attribute to value.
16366//
16367// value: The shape of the tensor.
16368// If not specified, defaults to <>
16369func PrelinearizeShape(value tf.Shape) PrelinearizeAttr {
16370	return func(m optionalAttr) {
16371		m["shape"] = value
16372	}
16373}
16374
16375// PrelinearizeLayout sets the optional layout attribute to value.
16376//
16377// value: A vector holding the requested layout in minor-to-major sequence. If a layout
16378// attribute is passed but its values are all -1 the layout will be computed by
16379// the infeed operation.
16380// If not specified, defaults to <>
16381func PrelinearizeLayout(value []int64) PrelinearizeAttr {
16382	return func(m optionalAttr) {
16383		m["layout"] = value
16384	}
16385}
16386
16387// An op which linearizes one Tensor value to an opaque variant tensor.
16388//
16389// Arguments:
16390//	input: A tensor that will be linearized.
16391func Prelinearize(scope *Scope, input tf.Output, optional ...PrelinearizeAttr) (output tf.Output) {
16392	if scope.Err() != nil {
16393		return
16394	}
16395	attrs := map[string]interface{}{}
16396	for _, a := range optional {
16397		a(attrs)
16398	}
16399	opspec := tf.OpSpec{
16400		Type: "Prelinearize",
16401		Input: []tf.Input{
16402			input,
16403		},
16404		Attrs: attrs,
16405	}
16406	op := scope.AddOperation(opspec)
16407	return op.Output(0)
16408}
16409
16410// QuantizedMatMulAttr is an optional argument to QuantizedMatMul.
16411type QuantizedMatMulAttr func(optionalAttr)
16412
16413// QuantizedMatMulToutput sets the optional Toutput attribute to value.
16414// If not specified, defaults to DT_QINT32
16415func QuantizedMatMulToutput(value tf.DataType) QuantizedMatMulAttr {
16416	return func(m optionalAttr) {
16417		m["Toutput"] = value
16418	}
16419}
16420
16421// QuantizedMatMulTransposeA sets the optional transpose_a attribute to value.
16422//
16423// value: If true, `a` is transposed before multiplication.
16424// If not specified, defaults to false
16425func QuantizedMatMulTransposeA(value bool) QuantizedMatMulAttr {
16426	return func(m optionalAttr) {
16427		m["transpose_a"] = value
16428	}
16429}
16430
16431// QuantizedMatMulTransposeB sets the optional transpose_b attribute to value.
16432//
16433// value: If true, `b` is transposed before multiplication.
16434// If not specified, defaults to false
16435func QuantizedMatMulTransposeB(value bool) QuantizedMatMulAttr {
16436	return func(m optionalAttr) {
16437		m["transpose_b"] = value
16438	}
16439}
16440
16441// QuantizedMatMulTactivation sets the optional Tactivation attribute to value.
16442//
16443// value: The type of output produced by activation function
16444// following this operation.
16445// If not specified, defaults to DT_QUINT8
16446func QuantizedMatMulTactivation(value tf.DataType) QuantizedMatMulAttr {
16447	return func(m optionalAttr) {
16448		m["Tactivation"] = value
16449	}
16450}
16451
16452// Perform a quantized matrix multiplication of  `a` by the matrix `b`.
16453//
16454// The inputs must be two-dimensional matrices and the inner dimension of
16455// `a` (after being transposed if `transpose_a` is non-zero) must match the
16456// outer dimension of `b` (after being transposed if `transposed_b` is
16457// non-zero).
16458//
16459// Arguments:
16460//	a: Must be a two-dimensional tensor.
16461//	b: Must be a two-dimensional tensor.
16462//	min_a: The float value that the lowest quantized `a` value represents.
16463//	max_a: The float value that the highest quantized `a` value represents.
16464//	min_b: The float value that the lowest quantized `b` value represents.
16465//	max_b: The float value that the highest quantized `b` value represents.
16466//
16467// Returns The float value that the lowest quantized output value represents.The float value that the highest quantized output value represents.
16468func QuantizedMatMul(scope *Scope, a tf.Output, b tf.Output, min_a tf.Output, max_a tf.Output, min_b tf.Output, max_b tf.Output, optional ...QuantizedMatMulAttr) (out tf.Output, min_out tf.Output, max_out tf.Output) {
16469	if scope.Err() != nil {
16470		return
16471	}
16472	attrs := map[string]interface{}{}
16473	for _, a := range optional {
16474		a(attrs)
16475	}
16476	opspec := tf.OpSpec{
16477		Type: "QuantizedMatMul",
16478		Input: []tf.Input{
16479			a, b, min_a, max_a, min_b, max_b,
16480		},
16481		Attrs: attrs,
16482	}
16483	op := scope.AddOperation(opspec)
16484	return op.Output(0), op.Output(1), op.Output(2)
16485}
16486
16487// Inverse 2D real-valued fast Fourier transform.
16488//
16489// Computes the inverse 2-dimensional discrete Fourier transform of a real-valued
16490// signal over the inner-most 2 dimensions of `input`.
16491//
16492// The inner-most 2 dimensions of `input` are assumed to be the result of `RFFT2D`:
16493// The inner-most dimension contains the `fft_length / 2 + 1` unique components of
16494// the DFT of a real-valued signal. If `fft_length` is not provided, it is computed
16495// from the size of the inner-most 2 dimensions of `input`. If the FFT length used
16496// to compute `input` is odd, it should be provided since it cannot be inferred
16497// properly.
16498//
16499// Along each axis `IRFFT2D` is computed on, if `fft_length` (or
16500// `fft_length / 2 + 1` for the inner-most dimension) is smaller than the
16501// corresponding dimension of `input`, the dimension is cropped. If it is larger,
16502// the dimension is padded with zeros.
16503//
16504// Arguments:
16505//	input: A complex64 tensor.
16506//	fft_length: An int32 tensor of shape [2]. The FFT length for each dimension.
16507//
16508// Returns A float32 tensor of the same rank as `input`. The inner-most 2
16509//   dimensions of `input` are replaced with the `fft_length` samples of their
16510//   inverse 2D Fourier transform.
16511//
16512// @compatibility(numpy)
16513// Equivalent to np.fft.irfft2
16514// @end_compatibility
16515func IRFFT2D(scope *Scope, input tf.Output, fft_length tf.Output) (output tf.Output) {
16516	if scope.Err() != nil {
16517		return
16518	}
16519	opspec := tf.OpSpec{
16520		Type: "IRFFT2D",
16521		Input: []tf.Input{
16522			input, fft_length,
16523		},
16524	}
16525	op := scope.AddOperation(opspec)
16526	return op.Output(0)
16527}
16528
16529// InfeedEnqueueTupleAttr is an optional argument to InfeedEnqueueTuple.
16530type InfeedEnqueueTupleAttr func(optionalAttr)
16531
16532// InfeedEnqueueTupleLayouts sets the optional layouts attribute to value.
16533//
16534// value: A vector holding the requested layout in minor-to-major sequence for
16535// all the tuple shapes, in the order the shapes appear in the "shapes" input.
16536// The layout elements for a sub-shape can be set to -1, in which case the
16537// corresponding layout will be computed by the infeed operation.
16538// If not specified, defaults to <>
16539func InfeedEnqueueTupleLayouts(value []int64) InfeedEnqueueTupleAttr {
16540	return func(m optionalAttr) {
16541		m["layouts"] = value
16542	}
16543}
16544
16545// InfeedEnqueueTupleDeviceOrdinal sets the optional device_ordinal attribute to value.
16546//
16547// value: The TPU device to use. This should be -1 when the Op
16548// is running on a TPU device, and >= 0 when the Op is running on the CPU
16549// device.
16550// If not specified, defaults to -1
16551func InfeedEnqueueTupleDeviceOrdinal(value int64) InfeedEnqueueTupleAttr {
16552	return func(m optionalAttr) {
16553		m["device_ordinal"] = value
16554	}
16555}
16556
16557// Feeds multiple Tensor values into the computation as an XLA tuple.
16558//
16559// Arguments:
16560//	inputs: A list of tensors that will be provided using the infeed mechanism.
16561//	shapes: The shapes of each tensor in `inputs`.
16562//
16563// Returns the created operation.
16564func InfeedEnqueueTuple(scope *Scope, inputs []tf.Output, shapes []tf.Shape, optional ...InfeedEnqueueTupleAttr) (o *tf.Operation) {
16565	if scope.Err() != nil {
16566		return
16567	}
16568	attrs := map[string]interface{}{"shapes": shapes}
16569	for _, a := range optional {
16570		a(attrs)
16571	}
16572	opspec := tf.OpSpec{
16573		Type: "InfeedEnqueueTuple",
16574		Input: []tf.Input{
16575			tf.OutputList(inputs),
16576		},
16577		Attrs: attrs,
16578	}
16579	return scope.AddOperation(opspec)
16580}
16581
16582// Returns which elements of x are finite.
16583//
16584// @compatibility(numpy)
16585// Equivalent to np.isfinite
16586// @end_compatibility
16587func IsFinite(scope *Scope, x tf.Output) (y tf.Output) {
16588	if scope.Err() != nil {
16589		return
16590	}
16591	opspec := tf.OpSpec{
16592		Type: "IsFinite",
16593		Input: []tf.Input{
16594			x,
16595		},
16596	}
16597	op := scope.AddOperation(opspec)
16598	return op.Output(0)
16599}
16600
16601// ResourceStridedSliceAssignAttr is an optional argument to ResourceStridedSliceAssign.
16602type ResourceStridedSliceAssignAttr func(optionalAttr)
16603
16604// ResourceStridedSliceAssignBeginMask sets the optional begin_mask attribute to value.
16605// If not specified, defaults to 0
16606func ResourceStridedSliceAssignBeginMask(value int64) ResourceStridedSliceAssignAttr {
16607	return func(m optionalAttr) {
16608		m["begin_mask"] = value
16609	}
16610}
16611
16612// ResourceStridedSliceAssignEndMask sets the optional end_mask attribute to value.
16613// If not specified, defaults to 0
16614func ResourceStridedSliceAssignEndMask(value int64) ResourceStridedSliceAssignAttr {
16615	return func(m optionalAttr) {
16616		m["end_mask"] = value
16617	}
16618}
16619
16620// ResourceStridedSliceAssignEllipsisMask sets the optional ellipsis_mask attribute to value.
16621// If not specified, defaults to 0
16622func ResourceStridedSliceAssignEllipsisMask(value int64) ResourceStridedSliceAssignAttr {
16623	return func(m optionalAttr) {
16624		m["ellipsis_mask"] = value
16625	}
16626}
16627
16628// ResourceStridedSliceAssignNewAxisMask sets the optional new_axis_mask attribute to value.
16629// If not specified, defaults to 0
16630func ResourceStridedSliceAssignNewAxisMask(value int64) ResourceStridedSliceAssignAttr {
16631	return func(m optionalAttr) {
16632		m["new_axis_mask"] = value
16633	}
16634}
16635
16636// ResourceStridedSliceAssignShrinkAxisMask sets the optional shrink_axis_mask attribute to value.
16637// If not specified, defaults to 0
16638func ResourceStridedSliceAssignShrinkAxisMask(value int64) ResourceStridedSliceAssignAttr {
16639	return func(m optionalAttr) {
16640		m["shrink_axis_mask"] = value
16641	}
16642}
16643
16644// Assign `value` to the sliced l-value reference of `ref`.
16645//
16646// The values of `value` are assigned to the positions in the variable
16647// `ref` that are selected by the slice parameters. The slice parameters
16648// `begin, `end`, `strides`, etc. work exactly as in `StridedSlice`.
16649//
16650// NOTE this op currently does not support broadcasting and so `value`'s
16651// shape must be exactly the shape produced by the slice of `ref`.
16652//
16653// Returns the created operation.
16654func ResourceStridedSliceAssign(scope *Scope, ref tf.Output, begin tf.Output, end tf.Output, strides tf.Output, value tf.Output, optional ...ResourceStridedSliceAssignAttr) (o *tf.Operation) {
16655	if scope.Err() != nil {
16656		return
16657	}
16658	attrs := map[string]interface{}{}
16659	for _, a := range optional {
16660		a(attrs)
16661	}
16662	opspec := tf.OpSpec{
16663		Type: "ResourceStridedSliceAssign",
16664		Input: []tf.Input{
16665			ref, begin, end, strides, value,
16666		},
16667		Attrs: attrs,
16668	}
16669	return scope.AddOperation(opspec)
16670}
16671
16672// ArgMaxAttr is an optional argument to ArgMax.
16673type ArgMaxAttr func(optionalAttr)
16674
16675// ArgMaxOutputType sets the optional output_type attribute to value.
16676// If not specified, defaults to DT_INT64
16677func ArgMaxOutputType(value tf.DataType) ArgMaxAttr {
16678	return func(m optionalAttr) {
16679		m["output_type"] = value
16680	}
16681}
16682
16683// Returns the index with the largest value across dimensions of a tensor.
16684//
16685// Note that in case of ties the identity of the return value is not guaranteed.
16686//
16687// Arguments:
16688//
16689//	dimension: int32 or int64, must be in the range `[-rank(input), rank(input))`.
16690// Describes which dimension of the input Tensor to reduce across. For vectors,
16691// use dimension = 0.
16692func ArgMax(scope *Scope, input tf.Output, dimension tf.Output, optional ...ArgMaxAttr) (output tf.Output) {
16693	if scope.Err() != nil {
16694		return
16695	}
16696	attrs := map[string]interface{}{}
16697	for _, a := range optional {
16698		a(attrs)
16699	}
16700	opspec := tf.OpSpec{
16701		Type: "ArgMax",
16702		Input: []tf.Input{
16703			input, dimension,
16704		},
16705		Attrs: attrs,
16706	}
16707	op := scope.AddOperation(opspec)
16708	return op.Output(0)
16709}
16710
16711// Extract `patches` from `images` and put them in the "depth" output dimension.
16712//
16713// Arguments:
16714//	images: 4-D Tensor with shape `[batch, in_rows, in_cols, depth]`.
16715//	ksizes: The size of the sliding window for each dimension of `images`.
16716//	strides: 1-D of length 4. How far the centers of two consecutive patches are in
16717// the images. Must be: `[1, stride_rows, stride_cols, 1]`.
16718//	rates: 1-D of length 4. Must be: `[1, rate_rows, rate_cols, 1]`. This is the
16719// input stride, specifying how far two consecutive patch samples are in the
16720// input. Equivalent to extracting patches with
16721// `patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by
16722// subsampling them spatially by a factor of `rates`. This is equivalent to
16723// `rate` in dilated (a.k.a. Atrous) convolutions.
16724//	padding: The type of padding algorithm to use.
16725//
16726// We specify the size-related attributes as:
16727//
16728// ```python
16729//       ksizes = [1, ksize_rows, ksize_cols, 1]
16730//       strides = [1, strides_rows, strides_cols, 1]
16731//       rates = [1, rates_rows, rates_cols, 1]
16732// ```
16733//
16734// Returns 4-D Tensor with shape `[batch, out_rows, out_cols, ksize_rows *
16735// ksize_cols * depth]` containing image patches with size
16736// `ksize_rows x ksize_cols x depth` vectorized in the "depth" dimension. Note
16737// `out_rows` and `out_cols` are the dimensions of the output patches.
16738func ExtractImagePatches(scope *Scope, images tf.Output, ksizes []int64, strides []int64, rates []int64, padding string) (patches tf.Output) {
16739	if scope.Err() != nil {
16740		return
16741	}
16742	attrs := map[string]interface{}{"ksizes": ksizes, "strides": strides, "rates": rates, "padding": padding}
16743	opspec := tf.OpSpec{
16744		Type: "ExtractImagePatches",
16745		Input: []tf.Input{
16746			images,
16747		},
16748		Attrs: attrs,
16749	}
16750	op := scope.AddOperation(opspec)
16751	return op.Output(0)
16752}
16753
16754// Computes the mean along sparse segments of a tensor.
16755//
16756// See `tf.sparse.segment_sum` for usage examples.
16757//
16758// Like `SegmentMean`, but `segment_ids` can have rank less than `data`'s first
16759// dimension, selecting a subset of dimension 0, specified by `indices`.
16760//
16761// Arguments:
16762//
16763//	indices: A 1-D tensor. Has same rank as `segment_ids`.
16764//	segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
16765//
16766// Returns Has same shape as data, except for dimension 0 which
16767// has size `k`, the number of segments.
16768func SparseSegmentMean(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output) (output tf.Output) {
16769	if scope.Err() != nil {
16770		return
16771	}
16772	opspec := tf.OpSpec{
16773		Type: "SparseSegmentMean",
16774		Input: []tf.Input{
16775			data, indices, segment_ids,
16776		},
16777	}
16778	op := scope.AddOperation(opspec)
16779	return op.Output(0)
16780}
16781
16782// Deserializes a serialized tree ensemble config and replaces current tree
16783//
16784// ensemble.
16785//
16786// Arguments:
16787//	tree_ensemble_handle: Handle to the tree ensemble.
16788//	stamp_token: Token to use as the new value of the resource stamp.
16789//	tree_ensemble_serialized: Serialized proto of the ensemble.
16790//
16791// Returns the created operation.
16792func BoostedTreesDeserializeEnsemble(scope *Scope, tree_ensemble_handle tf.Output, stamp_token tf.Output, tree_ensemble_serialized tf.Output) (o *tf.Operation) {
16793	if scope.Err() != nil {
16794		return
16795	}
16796	opspec := tf.OpSpec{
16797		Type: "BoostedTreesDeserializeEnsemble",
16798		Input: []tf.Input{
16799			tree_ensemble_handle, stamp_token, tree_ensemble_serialized,
16800		},
16801	}
16802	return scope.AddOperation(opspec)
16803}
16804
16805// Transforms a tf.Example proto (as a string) into typed tensors.
16806//
16807// Arguments:
16808//	serialized: A vector containing a batch of binary serialized Example protos.
16809//	dense_defaults: A list of Tensors (some may be empty), whose length matches
16810// the length of `dense_keys`. dense_defaults[j] provides default values
16811// when the example's feature_map lacks dense_key[j].  If an empty Tensor is
16812// provided for dense_defaults[j], then the Feature dense_keys[j] is required.
16813// The input type is inferred from dense_defaults[j], even when it's empty.
16814// If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined,
16815// then the shape of dense_defaults[j] must match that of dense_shapes[j].
16816// If dense_shapes[j] has an undefined major dimension (variable strides dense
16817// feature), dense_defaults[j] must contain a single element:
16818// the padding element.
16819//	num_sparse: The number of sparse features to be parsed from the example. This
16820// must match the lengths of `sparse_keys` and `sparse_types`.
16821//	sparse_keys: A list of `num_sparse` strings.
16822// The keys expected in the Examples' features associated with sparse values.
16823//	dense_keys: The keys expected in the Examples' features associated with dense
16824// values.
16825//	sparse_types: A list of `num_sparse` types; the data types of data in each
16826// Feature given in sparse_keys.
16827// Currently the ParseSingleExample op supports DT_FLOAT (FloatList),
16828// DT_INT64 (Int64List), and DT_STRING (BytesList).
16829//	dense_shapes: The shapes of data in each Feature given in dense_keys.
16830// The length of this list must match the length of `dense_keys`.  The
16831// number of elements in the Feature corresponding to dense_key[j] must
16832// always equal dense_shapes[j].NumEntries().  If dense_shapes[j] ==
16833// (D0, D1, ..., DN) then the shape of output Tensor dense_values[j]
16834// will be (D0, D1, ..., DN): In the case dense_shapes[j] = (-1, D1,
16835// ..., DN), the shape of the output Tensor dense_values[j] will be (M,
16836// D1, .., DN), where M is the number of blocks of elements of length
16837// D1 * .... * DN, in the input.
16838func ParseSingleExample(scope *Scope, serialized tf.Output, dense_defaults []tf.Output, num_sparse int64, sparse_keys []string, dense_keys []string, sparse_types []tf.DataType, dense_shapes []tf.Shape) (sparse_indices []tf.Output, sparse_values []tf.Output, sparse_shapes []tf.Output, dense_values []tf.Output) {
16839	if scope.Err() != nil {
16840		return
16841	}
16842	attrs := map[string]interface{}{"num_sparse": num_sparse, "sparse_keys": sparse_keys, "dense_keys": dense_keys, "sparse_types": sparse_types, "dense_shapes": dense_shapes}
16843	opspec := tf.OpSpec{
16844		Type: "ParseSingleExample",
16845		Input: []tf.Input{
16846			serialized, tf.OutputList(dense_defaults),
16847		},
16848		Attrs: attrs,
16849	}
16850	op := scope.AddOperation(opspec)
16851	if scope.Err() != nil {
16852		return
16853	}
16854	var idx int
16855	var err error
16856	if sparse_indices, idx, err = makeOutputList(op, idx, "sparse_indices"); err != nil {
16857		scope.UpdateErr("ParseSingleExample", err)
16858		return
16859	}
16860	if sparse_values, idx, err = makeOutputList(op, idx, "sparse_values"); err != nil {
16861		scope.UpdateErr("ParseSingleExample", err)
16862		return
16863	}
16864	if sparse_shapes, idx, err = makeOutputList(op, idx, "sparse_shapes"); err != nil {
16865		scope.UpdateErr("ParseSingleExample", err)
16866		return
16867	}
16868	if dense_values, idx, err = makeOutputList(op, idx, "dense_values"); err != nil {
16869		scope.UpdateErr("ParseSingleExample", err)
16870		return
16871	}
16872	return sparse_indices, sparse_values, sparse_shapes, dense_values
16873}
16874
16875// WholeFileReaderV2Attr is an optional argument to WholeFileReaderV2.
16876type WholeFileReaderV2Attr func(optionalAttr)
16877
16878// WholeFileReaderV2Container sets the optional container attribute to value.
16879//
16880// value: If non-empty, this reader is placed in the given container.
16881// Otherwise, a default container is used.
16882// If not specified, defaults to ""
16883func WholeFileReaderV2Container(value string) WholeFileReaderV2Attr {
16884	return func(m optionalAttr) {
16885		m["container"] = value
16886	}
16887}
16888
16889// WholeFileReaderV2SharedName sets the optional shared_name attribute to value.
16890//
16891// value: If non-empty, this reader is named in the given bucket
16892// with this shared_name. Otherwise, the node name is used instead.
16893// If not specified, defaults to ""
16894func WholeFileReaderV2SharedName(value string) WholeFileReaderV2Attr {
16895	return func(m optionalAttr) {
16896		m["shared_name"] = value
16897	}
16898}
16899
16900// A Reader that outputs the entire contents of a file as a value.
16901//
16902// To use, enqueue filenames in a Queue.  The output of ReaderRead will
16903// be a filename (key) and the contents of that file (value).
16904//
16905// Returns The handle to reference the Reader.
16906func WholeFileReaderV2(scope *Scope, optional ...WholeFileReaderV2Attr) (reader_handle tf.Output) {
16907	if scope.Err() != nil {
16908		return
16909	}
16910	attrs := map[string]interface{}{}
16911	for _, a := range optional {
16912		a(attrs)
16913	}
16914	opspec := tf.OpSpec{
16915		Type: "WholeFileReaderV2",
16916
16917		Attrs: attrs,
16918	}
16919	op := scope.AddOperation(opspec)
16920	return op.Output(0)
16921}
16922
16923// LoadTPUEmbeddingADAMParametersAttr is an optional argument to LoadTPUEmbeddingADAMParameters.
16924type LoadTPUEmbeddingADAMParametersAttr func(optionalAttr)
16925
16926// LoadTPUEmbeddingADAMParametersTableId sets the optional table_id attribute to value.
16927// If not specified, defaults to -1
16928//
16929// REQUIRES: value >= -1
16930func LoadTPUEmbeddingADAMParametersTableId(value int64) LoadTPUEmbeddingADAMParametersAttr {
16931	return func(m optionalAttr) {
16932		m["table_id"] = value
16933	}
16934}
16935
16936// LoadTPUEmbeddingADAMParametersTableName sets the optional table_name attribute to value.
16937// If not specified, defaults to ""
16938func LoadTPUEmbeddingADAMParametersTableName(value string) LoadTPUEmbeddingADAMParametersAttr {
16939	return func(m optionalAttr) {
16940		m["table_name"] = value
16941	}
16942}
16943
16944// Load ADAM embedding parameters.
16945//
16946// An op that loads optimization parameters into HBM for embedding. Must be
16947// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
16948// embedding table configuration. For example, this op is used to install
16949// parameters that are loaded from a checkpoint before a training loop is
16950// executed.
16951//
16952// Arguments:
16953//	parameters: Value of parameters used in the ADAM optimization algorithm.
16954//	momenta: Value of momenta used in the ADAM optimization algorithm.
16955//	velocities: Value of velocities used in the ADAM optimization algorithm.
16956//
16957//
16958//
16959// Returns the created operation.
16960func LoadTPUEmbeddingADAMParameters(scope *Scope, parameters tf.Output, momenta tf.Output, velocities tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingADAMParametersAttr) (o *tf.Operation) {
16961	if scope.Err() != nil {
16962		return
16963	}
16964	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
16965	for _, a := range optional {
16966		a(attrs)
16967	}
16968	opspec := tf.OpSpec{
16969		Type: "LoadTPUEmbeddingADAMParameters",
16970		Input: []tf.Input{
16971			parameters, momenta, velocities,
16972		},
16973		Attrs: attrs,
16974	}
16975	return scope.AddOperation(opspec)
16976}
16977
16978// InfeedEnqueuePrelinearizedBufferAttr is an optional argument to InfeedEnqueuePrelinearizedBuffer.
16979type InfeedEnqueuePrelinearizedBufferAttr func(optionalAttr)
16980
16981// InfeedEnqueuePrelinearizedBufferDeviceOrdinal sets the optional device_ordinal attribute to value.
16982//
16983// value: The TPU device to use. This should be -1 when the Op is running on a TPU device
16984// and = 0 when the Op is running on the CPU device.
16985// If not specified, defaults to -1
16986func InfeedEnqueuePrelinearizedBufferDeviceOrdinal(value int64) InfeedEnqueuePrelinearizedBufferAttr {
16987	return func(m optionalAttr) {
16988		m["device_ordinal"] = value
16989	}
16990}
16991
16992// An op which enqueues prelinearized buffer into TPU infeed.
16993//
16994// Arguments:
16995//	input: A variant tensor representing linearized output.
16996//
16997// Returns the created operation.
16998func InfeedEnqueuePrelinearizedBuffer(scope *Scope, input tf.Output, optional ...InfeedEnqueuePrelinearizedBufferAttr) (o *tf.Operation) {
16999	if scope.Err() != nil {
17000		return
17001	}
17002	attrs := map[string]interface{}{}
17003	for _, a := range optional {
17004		a(attrs)
17005	}
17006	opspec := tf.OpSpec{
17007		Type: "InfeedEnqueuePrelinearizedBuffer",
17008		Input: []tf.Input{
17009			input,
17010		},
17011		Attrs: attrs,
17012	}
17013	return scope.AddOperation(opspec)
17014}
17015
17016// Fetches multiple values from infeed as an XLA tuple.
17017//
17018// Arguments:
17019//	dtypes: The element types of each element in `outputs`.
17020//	shapes: The shapes of each tensor in `outputs`.
17021//
17022// Returns A list of tensors that will be provided using the infeed mechanism.
17023func InfeedDequeueTuple(scope *Scope, dtypes []tf.DataType, shapes []tf.Shape) (outputs []tf.Output) {
17024	if scope.Err() != nil {
17025		return
17026	}
17027	attrs := map[string]interface{}{"dtypes": dtypes, "shapes": shapes}
17028	opspec := tf.OpSpec{
17029		Type: "InfeedDequeueTuple",
17030
17031		Attrs: attrs,
17032	}
17033	op := scope.AddOperation(opspec)
17034	if scope.Err() != nil {
17035		return
17036	}
17037	var idx int
17038	var err error
17039	if outputs, idx, err = makeOutputList(op, idx, "outputs"); err != nil {
17040		scope.UpdateErr("InfeedDequeueTuple", err)
17041		return
17042	}
17043	return outputs
17044}
17045
17046// Enqueue multiple Tensor values on the computation outfeed.
17047//
17048// Arguments:
17049//	inputs: A list of tensors that will be inserted into the outfeed queue as an
17050// XLA tuple.
17051//
17052// Returns the created operation.
17053func OutfeedEnqueueTuple(scope *Scope, inputs []tf.Output) (o *tf.Operation) {
17054	if scope.Err() != nil {
17055		return
17056	}
17057	opspec := tf.OpSpec{
17058		Type: "OutfeedEnqueueTuple",
17059		Input: []tf.Input{
17060			tf.OutputList(inputs),
17061		},
17062	}
17063	return scope.AddOperation(opspec)
17064}
17065
17066// ResourceApplyAdagradAttr is an optional argument to ResourceApplyAdagrad.
17067type ResourceApplyAdagradAttr func(optionalAttr)
17068
17069// ResourceApplyAdagradUseLocking sets the optional use_locking attribute to value.
17070//
17071// value: If `True`, updating of the var and accum tensors will be protected
17072// by a lock; otherwise the behavior is undefined, but may exhibit less
17073// contention.
17074// If not specified, defaults to false
17075func ResourceApplyAdagradUseLocking(value bool) ResourceApplyAdagradAttr {
17076	return func(m optionalAttr) {
17077		m["use_locking"] = value
17078	}
17079}
17080
17081// ResourceApplyAdagradUpdateSlots sets the optional update_slots attribute to value.
17082// If not specified, defaults to true
17083func ResourceApplyAdagradUpdateSlots(value bool) ResourceApplyAdagradAttr {
17084	return func(m optionalAttr) {
17085		m["update_slots"] = value
17086	}
17087}
17088
17089// Update '*var' according to the adagrad scheme.
17090//
17091// accum += grad * grad
17092// var -= lr * grad * (1 / sqrt(accum))
17093//
17094// Arguments:
17095//	var_: Should be from a Variable().
17096//	accum: Should be from a Variable().
17097//	lr: Scaling factor. Must be a scalar.
17098//	grad: The gradient.
17099//
17100// Returns the created operation.
17101func ResourceApplyAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, optional ...ResourceApplyAdagradAttr) (o *tf.Operation) {
17102	if scope.Err() != nil {
17103		return
17104	}
17105	attrs := map[string]interface{}{}
17106	for _, a := range optional {
17107		a(attrs)
17108	}
17109	opspec := tf.OpSpec{
17110		Type: "ResourceApplyAdagrad",
17111		Input: []tf.Input{
17112			var_, accum, lr, grad,
17113		},
17114		Attrs: attrs,
17115	}
17116	return scope.AddOperation(opspec)
17117}
17118
17119// CudnnRNNV3Attr is an optional argument to CudnnRNNV3.
17120type CudnnRNNV3Attr func(optionalAttr)
17121
17122// CudnnRNNV3RnnMode sets the optional rnn_mode attribute to value.
17123// If not specified, defaults to "lstm"
17124func CudnnRNNV3RnnMode(value string) CudnnRNNV3Attr {
17125	return func(m optionalAttr) {
17126		m["rnn_mode"] = value
17127	}
17128}
17129
17130// CudnnRNNV3InputMode sets the optional input_mode attribute to value.
17131// If not specified, defaults to "linear_input"
17132func CudnnRNNV3InputMode(value string) CudnnRNNV3Attr {
17133	return func(m optionalAttr) {
17134		m["input_mode"] = value
17135	}
17136}
17137
17138// CudnnRNNV3Direction sets the optional direction attribute to value.
17139// If not specified, defaults to "unidirectional"
17140func CudnnRNNV3Direction(value string) CudnnRNNV3Attr {
17141	return func(m optionalAttr) {
17142		m["direction"] = value
17143	}
17144}
17145
17146// CudnnRNNV3Dropout sets the optional dropout attribute to value.
17147// If not specified, defaults to 0
17148func CudnnRNNV3Dropout(value float32) CudnnRNNV3Attr {
17149	return func(m optionalAttr) {
17150		m["dropout"] = value
17151	}
17152}
17153
17154// CudnnRNNV3Seed sets the optional seed attribute to value.
17155// If not specified, defaults to 0
17156func CudnnRNNV3Seed(value int64) CudnnRNNV3Attr {
17157	return func(m optionalAttr) {
17158		m["seed"] = value
17159	}
17160}
17161
17162// CudnnRNNV3Seed2 sets the optional seed2 attribute to value.
17163// If not specified, defaults to 0
17164func CudnnRNNV3Seed2(value int64) CudnnRNNV3Attr {
17165	return func(m optionalAttr) {
17166		m["seed2"] = value
17167	}
17168}
17169
17170// CudnnRNNV3IsTraining sets the optional is_training attribute to value.
17171// If not specified, defaults to true
17172func CudnnRNNV3IsTraining(value bool) CudnnRNNV3Attr {
17173	return func(m optionalAttr) {
17174		m["is_training"] = value
17175	}
17176}
17177
17178// CudnnRNNV3TimeMajor sets the optional time_major attribute to value.
17179// If not specified, defaults to true
17180func CudnnRNNV3TimeMajor(value bool) CudnnRNNV3Attr {
17181	return func(m optionalAttr) {
17182		m["time_major"] = value
17183	}
17184}
17185
17186// A RNN backed by cuDNN.
17187//
17188// Computes the RNN from the input and initial states, with respect to the params
17189// buffer. Accepts one extra input "sequence_lengths" than CudnnRNN.
17190//
17191// rnn_mode: Indicates the type of the RNN model.
17192// input_mode: Indicates whether there is a linear projection between the input and
17193//   the actual computation before the first layer. 'skip_input' is only allowed
17194//   when input_size == num_units; 'auto_select' implies 'skip_input' when
17195//   input_size == num_units; otherwise, it implies 'linear_input'.
17196// direction: Indicates whether a bidirectional model will be used. Should be
17197//   "unidirectional" or "bidirectional".
17198// dropout: Dropout probability. When set to 0., dropout is disabled.
17199// seed: The 1st part of a seed to initialize dropout.
17200// seed2: The 2nd part of a seed to initialize dropout.
17201// input: If time_major is true, this is a 3-D tensor with the shape of
17202//     [seq_length, batch_size, input_size]. If time_major is false, the shape is
17203//     [batch_size, seq_length, input_size].
17204// input_h: If time_major is true, this is a 3-D tensor with the shape of
17205//     [num_layer * dir, batch_size, num_units]. If time_major is false, the shape
17206//     is [batch_size, num_layer * dir, num_units].
17207// input_c: For LSTM, a 3-D tensor with the shape of
17208//     [num_layer * dir, batch, num_units]. For other models, it is ignored.
17209// params: A 1-D tensor that contains the weights and biases in an opaque layout.
17210//     The size must be created through CudnnRNNParamsSize, and initialized
17211//     separately. Note that they might not be compatible across different
17212//     generations. So it is a good idea to save and restore
17213// sequence_lengths: a vector of lengths of each input sequence.
17214// output: If time_major is true, this is a 3-D tensor with the shape of
17215//     [seq_length, batch_size, dir * num_units]. If time_major is false, the
17216//     shape is [batch_size, seq_length, dir * num_units].
17217// output_h: The same shape has input_h.
17218// output_c: The same shape as input_c for LSTM. An empty tensor for other models.
17219// is_training: Indicates whether this operation is used for inferenece or
17220//   training.
17221// time_major: Indicates whether the input/output format is time major or batch
17222//     major.
17223// reserve_space: An opaque tensor that can be used in backprop calculation. It
17224//   is only produced if is_training is true.
17225func CudnnRNNV3(scope *Scope, input tf.Output, input_h tf.Output, input_c tf.Output, params tf.Output, sequence_lengths tf.Output, optional ...CudnnRNNV3Attr) (output tf.Output, output_h tf.Output, output_c tf.Output, reserve_space tf.Output, host_reserved tf.Output) {
17226	if scope.Err() != nil {
17227		return
17228	}
17229	attrs := map[string]interface{}{}
17230	for _, a := range optional {
17231		a(attrs)
17232	}
17233	opspec := tf.OpSpec{
17234		Type: "CudnnRNNV3",
17235		Input: []tf.Input{
17236			input, input_h, input_c, params, sequence_lengths,
17237		},
17238		Attrs: attrs,
17239	}
17240	op := scope.AddOperation(opspec)
17241	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
17242}
17243
17244// Applies softmax to a batched N-D `SparseTensor`.
17245//
17246// The inputs represent an N-D SparseTensor  with logical shape `[..., B, C]`
17247// (where `N >= 2`), and with indices sorted in the canonical lexicographic order.
17248//
17249// This op is equivalent to applying the normal `tf.nn.softmax()` to each innermost
17250// logical submatrix with shape `[B, C]`, but with the catch that *the implicitly
17251// zero elements do not participate*.  Specifically, the algorithm is equivalent
17252// to the following:
17253//
17254//   (1) Applies `tf.nn.softmax()` to a densified view of each innermost submatrix
17255//       with shape `[B, C]`, along the size-C dimension;
17256//   (2) Masks out the original implicitly-zero locations;
17257//   (3) Renormalizes the remaining elements.
17258//
17259// Hence, the `SparseTensor` result has exactly the same non-zero indices and
17260// shape.
17261//
17262// Arguments:
17263//	sp_indices: 2-D.  `NNZ x R` matrix with the indices of non-empty values in a
17264// SparseTensor, in canonical ordering.
17265//	sp_values: 1-D.  `NNZ` non-empty values corresponding to `sp_indices`.
17266//	sp_shape: 1-D.  Shape of the input SparseTensor.
17267//
17268// Returns 1-D.  The `NNZ` values for the result `SparseTensor`.
17269func SparseSoftmax(scope *Scope, sp_indices tf.Output, sp_values tf.Output, sp_shape tf.Output) (output tf.Output) {
17270	if scope.Err() != nil {
17271		return
17272	}
17273	opspec := tf.OpSpec{
17274		Type: "SparseSoftmax",
17275		Input: []tf.Input{
17276			sp_indices, sp_values, sp_shape,
17277		},
17278	}
17279	op := scope.AddOperation(opspec)
17280	return op.Output(0)
17281}
17282
17283// Creates a Tensor by indexing into the TensorList.
17284//
17285// Each row in the produced Tensor corresponds to the element in the TensorList
17286// specified by the given index (see `tf.gather`).
17287//
17288// input_handle: The input tensor list.
17289// indices: The indices used to index into the list.
17290// values: The tensor.
17291func TensorListGather(scope *Scope, input_handle tf.Output, indices tf.Output, element_shape tf.Output, element_dtype tf.DataType) (values tf.Output) {
17292	if scope.Err() != nil {
17293		return
17294	}
17295	attrs := map[string]interface{}{"element_dtype": element_dtype}
17296	opspec := tf.OpSpec{
17297		Type: "TensorListGather",
17298		Input: []tf.Input{
17299			input_handle, indices, element_shape,
17300		},
17301		Attrs: attrs,
17302	}
17303	op := scope.AddOperation(opspec)
17304	return op.Output(0)
17305}
17306
17307// FixedLengthRecordReaderV2Attr is an optional argument to FixedLengthRecordReaderV2.
17308type FixedLengthRecordReaderV2Attr func(optionalAttr)
17309
17310// FixedLengthRecordReaderV2HeaderBytes sets the optional header_bytes attribute to value.
17311//
17312// value: Number of bytes in the header, defaults to 0.
17313// If not specified, defaults to 0
17314func FixedLengthRecordReaderV2HeaderBytes(value int64) FixedLengthRecordReaderV2Attr {
17315	return func(m optionalAttr) {
17316		m["header_bytes"] = value
17317	}
17318}
17319
17320// FixedLengthRecordReaderV2FooterBytes sets the optional footer_bytes attribute to value.
17321//
17322// value: Number of bytes in the footer, defaults to 0.
17323// If not specified, defaults to 0
17324func FixedLengthRecordReaderV2FooterBytes(value int64) FixedLengthRecordReaderV2Attr {
17325	return func(m optionalAttr) {
17326		m["footer_bytes"] = value
17327	}
17328}
17329
17330// FixedLengthRecordReaderV2HopBytes sets the optional hop_bytes attribute to value.
17331//
17332// value: Number of bytes to hop before each read. Default of 0 means using
17333// record_bytes.
17334// If not specified, defaults to 0
17335func FixedLengthRecordReaderV2HopBytes(value int64) FixedLengthRecordReaderV2Attr {
17336	return func(m optionalAttr) {
17337		m["hop_bytes"] = value
17338	}
17339}
17340
17341// FixedLengthRecordReaderV2Container sets the optional container attribute to value.
17342//
17343// value: If non-empty, this reader is placed in the given container.
17344// Otherwise, a default container is used.
17345// If not specified, defaults to ""
17346func FixedLengthRecordReaderV2Container(value string) FixedLengthRecordReaderV2Attr {
17347	return func(m optionalAttr) {
17348		m["container"] = value
17349	}
17350}
17351
17352// FixedLengthRecordReaderV2SharedName sets the optional shared_name attribute to value.
17353//
17354// value: If non-empty, this reader is named in the given bucket
17355// with this shared_name. Otherwise, the node name is used instead.
17356// If not specified, defaults to ""
17357func FixedLengthRecordReaderV2SharedName(value string) FixedLengthRecordReaderV2Attr {
17358	return func(m optionalAttr) {
17359		m["shared_name"] = value
17360	}
17361}
17362
17363// FixedLengthRecordReaderV2Encoding sets the optional encoding attribute to value.
17364//
17365// value: The type of encoding for the file. Currently ZLIB and GZIP
17366// are supported. Defaults to none.
17367// If not specified, defaults to ""
17368func FixedLengthRecordReaderV2Encoding(value string) FixedLengthRecordReaderV2Attr {
17369	return func(m optionalAttr) {
17370		m["encoding"] = value
17371	}
17372}
17373
17374// A Reader that outputs fixed-length records from a file.
17375//
17376// Arguments:
17377//	record_bytes: Number of bytes in the record.
17378//
17379// Returns The handle to reference the Reader.
17380func FixedLengthRecordReaderV2(scope *Scope, record_bytes int64, optional ...FixedLengthRecordReaderV2Attr) (reader_handle tf.Output) {
17381	if scope.Err() != nil {
17382		return
17383	}
17384	attrs := map[string]interface{}{"record_bytes": record_bytes}
17385	for _, a := range optional {
17386		a(attrs)
17387	}
17388	opspec := tf.OpSpec{
17389		Type: "FixedLengthRecordReaderV2",
17390
17391		Attrs: attrs,
17392	}
17393	op := scope.AddOperation(opspec)
17394	return op.Output(0)
17395}
17396
17397// ResourceApplyPowerSignAttr is an optional argument to ResourceApplyPowerSign.
17398type ResourceApplyPowerSignAttr func(optionalAttr)
17399
17400// ResourceApplyPowerSignUseLocking sets the optional use_locking attribute to value.
17401//
17402// value: If `True`, updating of the var and m tensors is
17403// protected by a lock; otherwise the behavior is undefined, but may exhibit less
17404// contention.
17405// If not specified, defaults to false
17406func ResourceApplyPowerSignUseLocking(value bool) ResourceApplyPowerSignAttr {
17407	return func(m optionalAttr) {
17408		m["use_locking"] = value
17409	}
17410}
17411
17412// Update '*var' according to the AddSign update.
17413//
17414// m_t <- beta1 * m_{t-1} + (1 - beta1) * g
17415// update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g
17416// variable <- variable - lr_t * update
17417//
17418// Arguments:
17419//	var_: Should be from a Variable().
17420//	m: Should be from a Variable().
17421//	lr: Scaling factor. Must be a scalar.
17422//	logbase: Must be a scalar.
17423//	sign_decay: Must be a scalar.
17424//	beta: Must be a scalar.
17425//	grad: The gradient.
17426//
17427// Returns the created operation.
17428func ResourceApplyPowerSign(scope *Scope, var_ tf.Output, m tf.Output, lr tf.Output, logbase tf.Output, sign_decay tf.Output, beta tf.Output, grad tf.Output, optional ...ResourceApplyPowerSignAttr) (o *tf.Operation) {
17429	if scope.Err() != nil {
17430		return
17431	}
17432	attrs := map[string]interface{}{}
17433	for _, a := range optional {
17434		a(attrs)
17435	}
17436	opspec := tf.OpSpec{
17437		Type: "ResourceApplyPowerSign",
17438		Input: []tf.Input{
17439			var_, m, lr, logbase, sign_decay, beta, grad,
17440		},
17441		Attrs: attrs,
17442	}
17443	return scope.AddOperation(opspec)
17444}
17445
17446// Connects outputs of an N-way replicated computation to N outputs.
17447func TPUReplicatedOutput(scope *Scope, input tf.Output, num_replicas int64) (outputs []tf.Output) {
17448	if scope.Err() != nil {
17449		return
17450	}
17451	attrs := map[string]interface{}{"num_replicas": num_replicas}
17452	opspec := tf.OpSpec{
17453		Type: "TPUReplicatedOutput",
17454		Input: []tf.Input{
17455			input,
17456		},
17457		Attrs: attrs,
17458	}
17459	op := scope.AddOperation(opspec)
17460	if scope.Err() != nil {
17461		return
17462	}
17463	var idx int
17464	var err error
17465	if outputs, idx, err = makeOutputList(op, idx, "outputs"); err != nil {
17466		scope.UpdateErr("TPUReplicatedOutput", err)
17467		return
17468	}
17469	return outputs
17470}
17471
17472// LoadTPUEmbeddingFTRLParametersAttr is an optional argument to LoadTPUEmbeddingFTRLParameters.
17473type LoadTPUEmbeddingFTRLParametersAttr func(optionalAttr)
17474
17475// LoadTPUEmbeddingFTRLParametersTableId sets the optional table_id attribute to value.
17476// If not specified, defaults to -1
17477//
17478// REQUIRES: value >= -1
17479func LoadTPUEmbeddingFTRLParametersTableId(value int64) LoadTPUEmbeddingFTRLParametersAttr {
17480	return func(m optionalAttr) {
17481		m["table_id"] = value
17482	}
17483}
17484
17485// LoadTPUEmbeddingFTRLParametersTableName sets the optional table_name attribute to value.
17486// If not specified, defaults to ""
17487func LoadTPUEmbeddingFTRLParametersTableName(value string) LoadTPUEmbeddingFTRLParametersAttr {
17488	return func(m optionalAttr) {
17489		m["table_name"] = value
17490	}
17491}
17492
17493// Load FTRL embedding parameters.
17494//
17495// An op that loads optimization parameters into HBM for embedding. Must be
17496// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
17497// embedding table configuration. For example, this op is used to install
17498// parameters that are loaded from a checkpoint before a training loop is
17499// executed.
17500//
17501// Arguments:
17502//	parameters: Value of parameters used in the FTRL optimization algorithm.
17503//	accumulators: Value of accumulators used in the FTRL optimization algorithm.
17504//	linears: Value of linears used in the FTRL optimization algorithm.
17505//
17506//
17507//
17508// Returns the created operation.
17509func LoadTPUEmbeddingFTRLParameters(scope *Scope, parameters tf.Output, accumulators tf.Output, linears tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingFTRLParametersAttr) (o *tf.Operation) {
17510	if scope.Err() != nil {
17511		return
17512	}
17513	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
17514	for _, a := range optional {
17515		a(attrs)
17516	}
17517	opspec := tf.OpSpec{
17518		Type: "LoadTPUEmbeddingFTRLParameters",
17519		Input: []tf.Input{
17520			parameters, accumulators, linears,
17521		},
17522		Attrs: attrs,
17523	}
17524	return scope.AddOperation(opspec)
17525}
17526
17527// Returns (x - y)(x - y) element-wise.
17528//
17529// *NOTE*: `SquaredDifference` supports broadcasting. More about broadcasting
17530// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
17531func SquaredDifference(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
17532	if scope.Err() != nil {
17533		return
17534	}
17535	opspec := tf.OpSpec{
17536		Type: "SquaredDifference",
17537		Input: []tf.Input{
17538			x, y,
17539		},
17540	}
17541	op := scope.AddOperation(opspec)
17542	return op.Output(0)
17543}
17544
17545// Push an element onto the tensor_array.
17546//
17547// Arguments:
17548//	handle: The handle to a TensorArray.
17549//	index: The position to write to inside the TensorArray.
17550//	value: The tensor to write to the TensorArray.
17551//	flow_in: A float scalar that enforces proper chaining of operations.
17552//
17553// Returns A float scalar that enforces proper chaining of operations.
17554func TensorArrayWriteV3(scope *Scope, handle tf.Output, index tf.Output, value tf.Output, flow_in tf.Output) (flow_out tf.Output) {
17555	if scope.Err() != nil {
17556		return
17557	}
17558	opspec := tf.OpSpec{
17559		Type: "TensorArrayWriteV3",
17560		Input: []tf.Input{
17561			handle, index, value, flow_in,
17562		},
17563	}
17564	op := scope.AddOperation(opspec)
17565	return op.Output(0)
17566}
17567
17568// RetrieveTPUEmbeddingAdagradParametersAttr is an optional argument to RetrieveTPUEmbeddingAdagradParameters.
17569type RetrieveTPUEmbeddingAdagradParametersAttr func(optionalAttr)
17570
17571// RetrieveTPUEmbeddingAdagradParametersTableId sets the optional table_id attribute to value.
17572// If not specified, defaults to -1
17573//
17574// REQUIRES: value >= -1
17575func RetrieveTPUEmbeddingAdagradParametersTableId(value int64) RetrieveTPUEmbeddingAdagradParametersAttr {
17576	return func(m optionalAttr) {
17577		m["table_id"] = value
17578	}
17579}
17580
17581// RetrieveTPUEmbeddingAdagradParametersTableName sets the optional table_name attribute to value.
17582// If not specified, defaults to ""
17583func RetrieveTPUEmbeddingAdagradParametersTableName(value string) RetrieveTPUEmbeddingAdagradParametersAttr {
17584	return func(m optionalAttr) {
17585		m["table_name"] = value
17586	}
17587}
17588
17589// Retrieve Adagrad embedding parameters.
17590//
17591// An op that retrieves optimization parameters from embedding to host
17592// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
17593// the correct embedding table configuration. For example, this op is
17594// used to retrieve updated parameters before saving a checkpoint.
17595//
17596// Returns Parameter parameters updated by the Adagrad optimization algorithm.Parameter accumulators updated by the Adagrad optimization algorithm.
17597func RetrieveTPUEmbeddingAdagradParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingAdagradParametersAttr) (parameters tf.Output, accumulators tf.Output) {
17598	if scope.Err() != nil {
17599		return
17600	}
17601	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
17602	for _, a := range optional {
17603		a(attrs)
17604	}
17605	opspec := tf.OpSpec{
17606		Type: "RetrieveTPUEmbeddingAdagradParameters",
17607
17608		Attrs: attrs,
17609	}
17610	op := scope.AddOperation(opspec)
17611	return op.Output(0), op.Output(1)
17612}
17613
17614// Compare values of `input` to `threshold` and pack resulting bits into a `uint8`.
17615//
17616// Each comparison returns a boolean `true` (if `input_value > threshold`)
17617// or and `false` otherwise.
17618//
17619// This operation is useful for Locality-Sensitive-Hashing (LSH) and other
17620// algorithms that use hashing approximations of cosine and `L2` distances;
17621// codes can be generated from an input via:
17622//
17623// ```python
17624// codebook_size = 50
17625// codebook_bits = codebook_size * 32
17626// codebook = tf.get_variable('codebook', [x.shape[-1].value, codebook_bits],
17627//                            dtype=x.dtype,
17628//                            initializer=tf.orthogonal_initializer())
17629// codes = compare_and_threshold(tf.matmul(x, codebook), threshold=0.)
17630// codes = tf.bitcast(codes, tf.int32)  # go from uint8 to int32
17631// # now codes has shape x.shape[:-1] + [codebook_size]
17632// ```
17633//
17634// **NOTE**: Currently, the innermost dimension of the tensor must be divisible
17635// by 8.
17636//
17637// Given an `input` shaped `[s0, s1, ..., s_n]`, the output is
17638// a `uint8` tensor shaped `[s0, s1, ..., s_n / 8]`.
17639//
17640// Arguments:
17641//	input: Values to compare against `threshold` and bitpack.
17642//	threshold: Threshold to compare against.
17643//
17644// Returns The bitpacked comparisons.
17645func CompareAndBitpack(scope *Scope, input tf.Output, threshold tf.Output) (output tf.Output) {
17646	if scope.Err() != nil {
17647		return
17648	}
17649	opspec := tf.OpSpec{
17650		Type: "CompareAndBitpack",
17651		Input: []tf.Input{
17652			input, threshold,
17653		},
17654	}
17655	op := scope.AddOperation(opspec)
17656	return op.Output(0)
17657}
17658
17659// QuantizeAndDequantizeV2Attr is an optional argument to QuantizeAndDequantizeV2.
17660type QuantizeAndDequantizeV2Attr func(optionalAttr)
17661
17662// QuantizeAndDequantizeV2SignedInput sets the optional signed_input attribute to value.
17663//
17664// value: Whether the quantization is signed or unsigned. (actually this parameter should
17665// have been called <b>`signed_output`</b>)
17666// If not specified, defaults to true
17667func QuantizeAndDequantizeV2SignedInput(value bool) QuantizeAndDequantizeV2Attr {
17668	return func(m optionalAttr) {
17669		m["signed_input"] = value
17670	}
17671}
17672
17673// QuantizeAndDequantizeV2NumBits sets the optional num_bits attribute to value.
17674//
17675// value: The bitwidth of the quantization.
17676// If not specified, defaults to 8
17677func QuantizeAndDequantizeV2NumBits(value int64) QuantizeAndDequantizeV2Attr {
17678	return func(m optionalAttr) {
17679		m["num_bits"] = value
17680	}
17681}
17682
17683// QuantizeAndDequantizeV2RangeGiven sets the optional range_given attribute to value.
17684//
17685// value: Whether the range is given or should be determined from the `input` tensor.
17686// If not specified, defaults to false
17687func QuantizeAndDequantizeV2RangeGiven(value bool) QuantizeAndDequantizeV2Attr {
17688	return func(m optionalAttr) {
17689		m["range_given"] = value
17690	}
17691}
17692
17693// QuantizeAndDequantizeV2RoundMode sets the optional round_mode attribute to value.
17694//
17695// value: The 'round_mode' attribute controls which rounding tie-breaking algorithm is
17696// used when rounding float values to their quantized equivalents. The following
17697// rounding modes are currently supported:
17698//
17699// *   HALF_TO_EVEN: this is the default round_mode.
17700// *   HALF_UP: round towards positive. In this mode 7.5 rounds up to 8 and -7.5
17701//     rounds up to -7.
17702//
17703// If not specified, defaults to "HALF_TO_EVEN"
17704func QuantizeAndDequantizeV2RoundMode(value string) QuantizeAndDequantizeV2Attr {
17705	return func(m optionalAttr) {
17706		m["round_mode"] = value
17707	}
17708}
17709
17710// Quantizes then dequantizes a tensor.
17711//
17712// This op simulates the precision loss from the quantized forward pass by:
17713//
17714// 1. Quantizing the tensor to fixed point numbers, which should match the target
17715//    quantization method when it is used in inference.
17716// 2. Dequantizing it back to floating point numbers for the following ops, most
17717//    likely matmul.
17718//
17719// There are different ways to quantize. This version uses only scaling, so 0.0
17720// maps to 0.
17721//
17722// From the specified 'num_bits' in the quantized output type, it determines
17723// minimum and maximum representable quantized values.
17724//
17725// e.g.
17726//
17727// *   [-128, 127] for signed, num_bits = 8, or
17728// *   [0, 255] for unsigned, num_bits = 8.
17729//
17730// If range_given == False, the initial input_min, input_max will be determined
17731// automatically as the minimum and maximum values in the input tensor, otherwise
17732// the specified values of input_min, input_max are used.
17733//
17734// Note: If the input_min, input_max are specified, they do not need to equal the
17735// actual minimum and maximum values in the tensor. e.g. in some cases it may be
17736// beneficial to specify these values such that the low probability extremes of the
17737// input distribution are clipped.
17738//
17739// This op determines the maximum scale_factor that would map the initial
17740// [input_min, input_max] range to a range that lies within the representable
17741// quantized range.
17742//
17743// It determines the scale from one of input_min and input_max, then updates the
17744// other one to maximize the respresentable range.
17745//
17746// e.g.
17747//
17748// *   if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0,
17749//     5.0]: it would use a scale_factor of -128 / -10.0 = 12.8 In this case, it
17750//     would update input_max to be 127 / 12.8 = 9.921875
17751// *   if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0,
17752//     10.0]: it would use a scale_factor of 127 / 10.0 = 12.7 In this case, it
17753//     would update input_min to be 128.0 / 12.7 = -10.07874
17754// *   if the output is unsigned, input_min is forced to be 0, and only the
17755//     specified input_max is used.
17756//
17757// After determining the scale_factor and updating the input range, it applies the
17758// following to each value in the 'input' tensor.
17759//
17760// output = round(clamp(value, input_min, input_max) * scale_factor) / scale_factor.
17761//
17762// The above round function rounds the value based on the given round_mode.
17763//
17764//
17765// Arguments:
17766//	input: Tensor to quantize and then dequantize.
17767//	input_min: If `range_given == True`, this specifies the minimum input value that needs to
17768// be represented, otherwise it is determined from the min value of the `input`
17769// tensor.
17770//	input_max: If `range_given == True`, this specifies the maximum input value that needs to
17771// be represented, otherwise it is determined from the max value of the `input`
17772// tensor.
17773func QuantizeAndDequantizeV2(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, optional ...QuantizeAndDequantizeV2Attr) (output tf.Output) {
17774	if scope.Err() != nil {
17775		return
17776	}
17777	attrs := map[string]interface{}{}
17778	for _, a := range optional {
17779		a(attrs)
17780	}
17781	opspec := tf.OpSpec{
17782		Type: "QuantizeAndDequantizeV2",
17783		Input: []tf.Input{
17784			input, input_min, input_max,
17785		},
17786		Attrs: attrs,
17787	}
17788	op := scope.AddOperation(opspec)
17789	return op.Output(0)
17790}
17791
17792// A TPU core selector Op.
17793//
17794// This Op produces a set of TPU cores (for warm-up) or a single TPU core
17795// (for regular inference) to execute the TPU program on. The output is
17796// consumed by TPUPartitionedCall.
17797//
17798// Returns A vector 1 or more TPU cores.
17799func TPUOrdinalSelector(scope *Scope) (device_ordinals tf.Output) {
17800	if scope.Err() != nil {
17801		return
17802	}
17803	opspec := tf.OpSpec{
17804		Type: "TPUOrdinalSelector",
17805	}
17806	op := scope.AddOperation(opspec)
17807	return op.Output(0)
17808}
17809
17810// Looks up keys in a table, outputs the corresponding values.
17811//
17812// The tensor `keys` must of the same type as the keys of the table.
17813// The output `values` is of the type of the table values.
17814//
17815// The scalar `default_value` is the value output for keys not present in the
17816// table. It must also be of the same type as the table values.
17817//
17818// Arguments:
17819//	table_handle: Handle to the table.
17820//	keys: Any shape.  Keys to look up.
17821//
17822//
17823// Returns Same shape as `keys`.  Values found in the table, or `default_values`
17824// for missing keys.
17825func LookupTableFindV2(scope *Scope, table_handle tf.Output, keys tf.Output, default_value tf.Output) (values tf.Output) {
17826	if scope.Err() != nil {
17827		return
17828	}
17829	opspec := tf.OpSpec{
17830		Type: "LookupTableFindV2",
17831		Input: []tf.Input{
17832			table_handle, keys, default_value,
17833		},
17834	}
17835	op := scope.AddOperation(opspec)
17836	return op.Output(0)
17837}
17838
17839// ResourceSparseApplyRMSPropAttr is an optional argument to ResourceSparseApplyRMSProp.
17840type ResourceSparseApplyRMSPropAttr func(optionalAttr)
17841
17842// ResourceSparseApplyRMSPropUseLocking sets the optional use_locking attribute to value.
17843//
17844// value: If `True`, updating of the var, ms, and mom tensors is protected
17845// by a lock; otherwise the behavior is undefined, but may exhibit less
17846// contention.
17847// If not specified, defaults to false
17848func ResourceSparseApplyRMSPropUseLocking(value bool) ResourceSparseApplyRMSPropAttr {
17849	return func(m optionalAttr) {
17850		m["use_locking"] = value
17851	}
17852}
17853
17854// Update '*var' according to the RMSProp algorithm.
17855//
17856// Note that in dense implementation of this algorithm, ms and mom will
17857// update even if the grad is zero, but in this sparse implementation, ms
17858// and mom will not update in iterations during which the grad is zero.
17859//
17860// mean_square = decay * mean_square + (1-decay) * gradient ** 2
17861// Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
17862//
17863// ms <- rho * ms_{t-1} + (1-rho) * grad * grad
17864// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
17865// var <- var - mom
17866//
17867// Arguments:
17868//	var_: Should be from a Variable().
17869//	ms: Should be from a Variable().
17870//	mom: Should be from a Variable().
17871//	lr: Scaling factor. Must be a scalar.
17872//	rho: Decay rate. Must be a scalar.
17873//
17874//	epsilon: Ridge term. Must be a scalar.
17875//	grad: The gradient.
17876//	indices: A vector of indices into the first dimension of var, ms and mom.
17877//
17878// Returns the created operation.
17879func ResourceSparseApplyRMSProp(scope *Scope, var_ tf.Output, ms tf.Output, mom tf.Output, lr tf.Output, rho tf.Output, momentum tf.Output, epsilon tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyRMSPropAttr) (o *tf.Operation) {
17880	if scope.Err() != nil {
17881		return
17882	}
17883	attrs := map[string]interface{}{}
17884	for _, a := range optional {
17885		a(attrs)
17886	}
17887	opspec := tf.OpSpec{
17888		Type: "ResourceSparseApplyRMSProp",
17889		Input: []tf.Input{
17890			var_, ms, mom, lr, rho, momentum, epsilon, grad, indices,
17891		},
17892		Attrs: attrs,
17893	}
17894	return scope.AddOperation(opspec)
17895}
17896
17897// Returns the truth value of (x > y) element-wise.
17898//
17899// *NOTE*: `Greater` supports broadcasting. More about broadcasting
17900// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
17901func Greater(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
17902	if scope.Err() != nil {
17903		return
17904	}
17905	opspec := tf.OpSpec{
17906		Type: "Greater",
17907		Input: []tf.Input{
17908			x, y,
17909		},
17910	}
17911	op := scope.AddOperation(opspec)
17912	return op.Output(0)
17913}
17914
17915// Creates a TensorList by indexing into a Tensor.
17916//
17917// Each member of the TensorList corresponds to one row of the input tensor,
17918// specified by the given index (see `tf.gather`).
17919//
17920// tensor: The input tensor.
17921// indices: The indices used to index into the list.
17922// element_shape: The shape of the elements in the list (can be less specified than
17923//   the shape of the tensor).
17924// num_elements: The size of the output list. Must be large enough to accommodate
17925//   the largest index in indices. If -1, the list is just large enough to include
17926//   the largest index in indices.
17927// output_handle: The TensorList.
17928func TensorListScatterV2(scope *Scope, tensor tf.Output, indices tf.Output, element_shape tf.Output, num_elements tf.Output) (output_handle tf.Output) {
17929	if scope.Err() != nil {
17930		return
17931	}
17932	opspec := tf.OpSpec{
17933		Type: "TensorListScatterV2",
17934		Input: []tf.Input{
17935			tensor, indices, element_shape, num_elements,
17936		},
17937	}
17938	op := scope.AddOperation(opspec)
17939	return op.Output(0)
17940}
17941
17942// SampleDistortedBoundingBoxAttr is an optional argument to SampleDistortedBoundingBox.
17943type SampleDistortedBoundingBoxAttr func(optionalAttr)
17944
17945// SampleDistortedBoundingBoxSeed sets the optional seed attribute to value.
17946//
17947// value: If either `seed` or `seed2` are set to non-zero, the random number
17948// generator is seeded by the given `seed`.  Otherwise, it is seeded by a random
17949// seed.
17950// If not specified, defaults to 0
17951func SampleDistortedBoundingBoxSeed(value int64) SampleDistortedBoundingBoxAttr {
17952	return func(m optionalAttr) {
17953		m["seed"] = value
17954	}
17955}
17956
17957// SampleDistortedBoundingBoxSeed2 sets the optional seed2 attribute to value.
17958//
17959// value: A second seed to avoid seed collision.
17960// If not specified, defaults to 0
17961func SampleDistortedBoundingBoxSeed2(value int64) SampleDistortedBoundingBoxAttr {
17962	return func(m optionalAttr) {
17963		m["seed2"] = value
17964	}
17965}
17966
17967// SampleDistortedBoundingBoxMinObjectCovered sets the optional min_object_covered attribute to value.
17968//
17969// value: The cropped area of the image must contain at least this
17970// fraction of any bounding box supplied. The value of this parameter should be
17971// non-negative. In the case of 0, the cropped area does not need to overlap
17972// any of the bounding boxes supplied.
17973// If not specified, defaults to 0.1
17974func SampleDistortedBoundingBoxMinObjectCovered(value float32) SampleDistortedBoundingBoxAttr {
17975	return func(m optionalAttr) {
17976		m["min_object_covered"] = value
17977	}
17978}
17979
17980// SampleDistortedBoundingBoxAspectRatioRange sets the optional aspect_ratio_range attribute to value.
17981//
17982// value: The cropped area of the image must have an aspect ratio =
17983// width / height within this range.
17984// If not specified, defaults to <f:0.75 f:1.33 >
17985func SampleDistortedBoundingBoxAspectRatioRange(value []float32) SampleDistortedBoundingBoxAttr {
17986	return func(m optionalAttr) {
17987		m["aspect_ratio_range"] = value
17988	}
17989}
17990
17991// SampleDistortedBoundingBoxAreaRange sets the optional area_range attribute to value.
17992//
17993// value: The cropped area of the image must contain a fraction of the
17994// supplied image within this range.
17995// If not specified, defaults to <f:0.05 f:1 >
17996func SampleDistortedBoundingBoxAreaRange(value []float32) SampleDistortedBoundingBoxAttr {
17997	return func(m optionalAttr) {
17998		m["area_range"] = value
17999	}
18000}
18001
18002// SampleDistortedBoundingBoxMaxAttempts sets the optional max_attempts attribute to value.
18003//
18004// value: Number of attempts at generating a cropped region of the image
18005// of the specified constraints. After `max_attempts` failures, return the entire
18006// image.
18007// If not specified, defaults to 100
18008func SampleDistortedBoundingBoxMaxAttempts(value int64) SampleDistortedBoundingBoxAttr {
18009	return func(m optionalAttr) {
18010		m["max_attempts"] = value
18011	}
18012}
18013
18014// SampleDistortedBoundingBoxUseImageIfNoBoundingBoxes sets the optional use_image_if_no_bounding_boxes attribute to value.
18015//
18016// value: Controls behavior if no bounding boxes supplied.
18017// If true, assume an implicit bounding box covering the whole input. If false,
18018// raise an error.
18019// If not specified, defaults to false
18020func SampleDistortedBoundingBoxUseImageIfNoBoundingBoxes(value bool) SampleDistortedBoundingBoxAttr {
18021	return func(m optionalAttr) {
18022		m["use_image_if_no_bounding_boxes"] = value
18023	}
18024}
18025
18026// Generate a single randomly distorted bounding box for an image.
18027//
18028// Bounding box annotations are often supplied in addition to ground-truth labels
18029// in image recognition or object localization tasks. A common technique for
18030// training such a system is to randomly distort an image while preserving
18031// its content, i.e. *data augmentation*. This Op outputs a randomly distorted
18032// localization of an object, i.e. bounding box, given an `image_size`,
18033// `bounding_boxes` and a series of constraints.
18034//
18035// The output of this Op is a single bounding box that may be used to crop the
18036// original image. The output is returned as 3 tensors: `begin`, `size` and
18037// `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the
18038// image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize
18039// what the bounding box looks like.
18040//
18041// Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The
18042// bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
18043// height of the underlying image.
18044//
18045// For example,
18046//
18047// ```python
18048//     # Generate a single distorted bounding box.
18049//     begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(
18050//         tf.shape(image),
18051//         bounding_boxes=bounding_boxes)
18052//
18053//     # Draw the bounding box in an image summary.
18054//     image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
18055//                                                   bbox_for_draw)
18056//     tf.summary.image('images_with_box', image_with_box)
18057//
18058//     # Employ the bounding box to distort the image.
18059//     distorted_image = tf.slice(image, begin, size)
18060// ```
18061//
18062// Note that if no bounding box information is available, setting
18063// `use_image_if_no_bounding_boxes = true` will assume there is a single implicit
18064// bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is
18065// false and no bounding boxes are supplied, an error is raised.
18066//
18067// Arguments:
18068//	image_size: 1-D, containing `[height, width, channels]`.
18069//	bounding_boxes: 3-D with shape `[batch, N, 4]` describing the N bounding boxes
18070// associated with the image.
18071//
18072// Returns 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to
18073// `tf.slice`.1-D, containing `[target_height, target_width, -1]`. Provide as input to
18074// `tf.slice`.3-D with shape `[1, 1, 4]` containing the distorted bounding box.
18075// Provide as input to `tf.image.draw_bounding_boxes`.
18076func SampleDistortedBoundingBox(scope *Scope, image_size tf.Output, bounding_boxes tf.Output, optional ...SampleDistortedBoundingBoxAttr) (begin tf.Output, size tf.Output, bboxes tf.Output) {
18077	if scope.Err() != nil {
18078		return
18079	}
18080	attrs := map[string]interface{}{}
18081	for _, a := range optional {
18082		a(attrs)
18083	}
18084	opspec := tf.OpSpec{
18085		Type: "SampleDistortedBoundingBox",
18086		Input: []tf.Input{
18087			image_size, bounding_boxes,
18088		},
18089		Attrs: attrs,
18090	}
18091	op := scope.AddOperation(opspec)
18092	return op.Output(0), op.Output(1), op.Output(2)
18093}
18094
18095// ResourceScatterNdUpdateAttr is an optional argument to ResourceScatterNdUpdate.
18096type ResourceScatterNdUpdateAttr func(optionalAttr)
18097
18098// ResourceScatterNdUpdateUseLocking sets the optional use_locking attribute to value.
18099//
18100// value: An optional bool. Defaults to True. If True, the assignment will
18101// be protected by a lock; otherwise the behavior is undefined,
18102// but may exhibit less contention.
18103// If not specified, defaults to true
18104func ResourceScatterNdUpdateUseLocking(value bool) ResourceScatterNdUpdateAttr {
18105	return func(m optionalAttr) {
18106		m["use_locking"] = value
18107	}
18108}
18109
18110// Applies sparse `updates` to individual values or slices within a given
18111//
18112// variable according to `indices`.
18113//
18114// `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
18115//
18116// `indices` must be integer tensor, containing indices into `ref`.
18117// It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
18118//
18119// The innermost dimension of `indices` (with length `K`) corresponds to
18120// indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
18121// dimension of `ref`.
18122//
18123// `updates` is `Tensor` of rank `Q-1+P-K` with shape:
18124//
18125// ```
18126// [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
18127// ```
18128//
18129// For example, say we want to update 4 scattered elements to a rank-1 tensor to
18130// 8 elements. In Python, that update would look like this:
18131//
18132// ```python
18133//     ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
18134//     indices = tf.constant([[4], [3], [1] ,[7]])
18135//     updates = tf.constant([9, 10, 11, 12])
18136//     update = tf.scatter_nd_update(ref, indices, updates)
18137//     with tf.Session() as sess:
18138//       print sess.run(update)
18139// ```
18140//
18141// The resulting update to ref would look like this:
18142//
18143//     [1, 11, 3, 10, 9, 6, 7, 12]
18144//
18145// See `tf.scatter_nd` for more details about how to make updates to
18146// slices.
18147//
18148// Arguments:
18149//	ref: A resource handle. Must be from a VarHandleOp.
18150//	indices: A Tensor. Must be one of the following types: int32, int64.
18151// A tensor of indices into ref.
18152//	updates: A Tensor. Must have the same type as ref. A tensor of updated
18153// values to add to ref.
18154//
18155// Returns the created operation.
18156func ResourceScatterNdUpdate(scope *Scope, ref tf.Output, indices tf.Output, updates tf.Output, optional ...ResourceScatterNdUpdateAttr) (o *tf.Operation) {
18157	if scope.Err() != nil {
18158		return
18159	}
18160	attrs := map[string]interface{}{}
18161	for _, a := range optional {
18162		a(attrs)
18163	}
18164	opspec := tf.OpSpec{
18165		Type: "ResourceScatterNdUpdate",
18166		Input: []tf.Input{
18167			ref, indices, updates,
18168		},
18169		Attrs: attrs,
18170	}
18171	return scope.AddOperation(opspec)
18172}
18173
18174// UnicodeDecodeWithOffsetsAttr is an optional argument to UnicodeDecodeWithOffsets.
18175type UnicodeDecodeWithOffsetsAttr func(optionalAttr)
18176
18177// UnicodeDecodeWithOffsetsErrors sets the optional errors attribute to value.
18178//
18179// value: Error handling policy when there is invalid formatting found in the input.
18180// The value of 'strict' will cause the operation to produce a InvalidArgument
18181// error on any invalid input formatting. A value of 'replace' (the default) will
18182// cause the operation to replace any invalid formatting in the input with the
18183// `replacement_char` codepoint. A value of 'ignore' will cause the operation to
18184// skip any invalid formatting in the input and produce no corresponding output
18185// character.
18186// If not specified, defaults to "replace"
18187func UnicodeDecodeWithOffsetsErrors(value string) UnicodeDecodeWithOffsetsAttr {
18188	return func(m optionalAttr) {
18189		m["errors"] = value
18190	}
18191}
18192
18193// UnicodeDecodeWithOffsetsReplacementChar sets the optional replacement_char attribute to value.
18194//
18195// value: The replacement character codepoint to be used in place of any invalid
18196// formatting in the input when `errors='replace'`. Any valid unicode codepoint may
18197// be used. The default value is the default unicode replacement character is
18198// 0xFFFD or U+65533.)
18199// If not specified, defaults to 65533
18200func UnicodeDecodeWithOffsetsReplacementChar(value int64) UnicodeDecodeWithOffsetsAttr {
18201	return func(m optionalAttr) {
18202		m["replacement_char"] = value
18203	}
18204}
18205
18206// UnicodeDecodeWithOffsetsReplaceControlCharacters sets the optional replace_control_characters attribute to value.
18207//
18208// value: Whether to replace the C0 control characters (00-1F) with the
18209// `replacement_char`. Default is false.
18210// If not specified, defaults to false
18211func UnicodeDecodeWithOffsetsReplaceControlCharacters(value bool) UnicodeDecodeWithOffsetsAttr {
18212	return func(m optionalAttr) {
18213		m["replace_control_characters"] = value
18214	}
18215}
18216
18217// Decodes each string in `input` into a sequence of Unicode code points.
18218//
18219// The character codepoints for all strings are returned using a single vector
18220// `char_values`, with strings expanded to characters in row-major order.
18221// Similarly, the character start byte offsets are returned using a single vector
18222// `char_to_byte_starts`, with strings expanded in row-major order.
18223//
18224// The `row_splits` tensor indicates where the codepoints and start offsets for
18225// each input string begin and end within the `char_values` and
18226// `char_to_byte_starts` tensors.  In particular, the values for the `i`th
18227// string (in row-major order) are stored in the slice
18228// `[row_splits[i]:row_splits[i+1]]`. Thus:
18229//
18230// * `char_values[row_splits[i]+j]` is the Unicode codepoint for the `j`th
18231//   character in the `i`th string (in row-major order).
18232// * `char_to_bytes_starts[row_splits[i]+j]` is the start byte offset for the `j`th
18233//   character in the `i`th string (in row-major order).
18234// * `row_splits[i+1] - row_splits[i]` is the number of characters in the `i`th
18235//   string (in row-major order).
18236//
18237// Arguments:
18238//	input: The text to be decoded. Can have any shape. Note that the output is flattened
18239// to a vector of char values.
18240//	input_encoding: Text encoding of the input strings. This is any of the encodings supported
18241// by ICU ucnv algorithmic converters. Examples: `"UTF-16", "US ASCII", "UTF-8"`.
18242//
18243// Returns A 1D int32 tensor containing the row splits.A 1D int32 Tensor containing the decoded codepoints.A 1D int32 Tensor containing the byte index in the input string where each
18244// character in `char_values` starts.
18245func UnicodeDecodeWithOffsets(scope *Scope, input tf.Output, input_encoding string, optional ...UnicodeDecodeWithOffsetsAttr) (row_splits tf.Output, char_values tf.Output, char_to_byte_starts tf.Output) {
18246	if scope.Err() != nil {
18247		return
18248	}
18249	attrs := map[string]interface{}{"input_encoding": input_encoding}
18250	for _, a := range optional {
18251		a(attrs)
18252	}
18253	opspec := tf.OpSpec{
18254		Type: "UnicodeDecodeWithOffsets",
18255		Input: []tf.Input{
18256			input,
18257		},
18258		Attrs: attrs,
18259	}
18260	op := scope.AddOperation(opspec)
18261	return op.Output(0), op.Output(1), op.Output(2)
18262}
18263
18264// Returns x - y element-wise.
18265//
18266// *NOTE*: `Subtract` supports broadcasting. More about broadcasting
18267// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
18268func Sub(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
18269	if scope.Err() != nil {
18270		return
18271	}
18272	opspec := tf.OpSpec{
18273		Type: "Sub",
18274		Input: []tf.Input{
18275			x, y,
18276		},
18277	}
18278	op := scope.AddOperation(opspec)
18279	return op.Output(0)
18280}
18281
18282// LRNAttr is an optional argument to LRN.
18283type LRNAttr func(optionalAttr)
18284
18285// LRNDepthRadius sets the optional depth_radius attribute to value.
18286//
18287// value: 0-D.  Half-width of the 1-D normalization window.
18288// If not specified, defaults to 5
18289func LRNDepthRadius(value int64) LRNAttr {
18290	return func(m optionalAttr) {
18291		m["depth_radius"] = value
18292	}
18293}
18294
18295// LRNBias sets the optional bias attribute to value.
18296//
18297// value: An offset (usually positive to avoid dividing by 0).
18298// If not specified, defaults to 1
18299func LRNBias(value float32) LRNAttr {
18300	return func(m optionalAttr) {
18301		m["bias"] = value
18302	}
18303}
18304
18305// LRNAlpha sets the optional alpha attribute to value.
18306//
18307// value: A scale factor, usually positive.
18308// If not specified, defaults to 1
18309func LRNAlpha(value float32) LRNAttr {
18310	return func(m optionalAttr) {
18311		m["alpha"] = value
18312	}
18313}
18314
18315// LRNBeta sets the optional beta attribute to value.
18316//
18317// value: An exponent.
18318// If not specified, defaults to 0.5
18319func LRNBeta(value float32) LRNAttr {
18320	return func(m optionalAttr) {
18321		m["beta"] = value
18322	}
18323}
18324
18325// Local Response Normalization.
18326//
18327// The 4-D `input` tensor is treated as a 3-D array of 1-D vectors (along the last
18328// dimension), and each vector is normalized independently.  Within a given vector,
18329// each component is divided by the weighted, squared sum of inputs within
18330// `depth_radius`.  In detail,
18331//
18332//     sqr_sum[a, b, c, d] =
18333//         sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2)
18334//     output = input / (bias + alpha * sqr_sum) ** beta
18335//
18336// For details, see [Krizhevsky et al., ImageNet classification with deep
18337// convolutional neural networks (NIPS 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks).
18338//
18339// Arguments:
18340//	input: 4-D.
18341func LRN(scope *Scope, input tf.Output, optional ...LRNAttr) (output tf.Output) {
18342	if scope.Err() != nil {
18343		return
18344	}
18345	attrs := map[string]interface{}{}
18346	for _, a := range optional {
18347		a(attrs)
18348	}
18349	opspec := tf.OpSpec{
18350		Type: "LRN",
18351		Input: []tf.Input{
18352			input,
18353		},
18354		Attrs: attrs,
18355	}
18356	op := scope.AddOperation(opspec)
18357	return op.Output(0)
18358}
18359
18360// RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebugAttr is an optional argument to RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug.
18361type RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebugAttr func(optionalAttr)
18362
18363// RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebugTableId sets the optional table_id attribute to value.
18364// If not specified, defaults to -1
18365//
18366// REQUIRES: value >= -1
18367func RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebugTableId(value int64) RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebugAttr {
18368	return func(m optionalAttr) {
18369		m["table_id"] = value
18370	}
18371}
18372
18373// RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebugTableName sets the optional table_name attribute to value.
18374// If not specified, defaults to ""
18375func RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebugTableName(value string) RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebugAttr {
18376	return func(m optionalAttr) {
18377		m["table_name"] = value
18378	}
18379}
18380
18381// Retrieve proximal Adagrad embedding parameters with debug support.
18382//
18383// An op that retrieves optimization parameters from embedding to host
18384// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
18385// the correct embedding table configuration. For example, this op is
18386// used to retrieve updated parameters before saving a checkpoint.
18387//
18388// Returns Parameter parameters updated by the proximal Adagrad optimization algorithm.Parameter accumulators updated by the proximal Adagrad optimization algorithm.Parameter gradient_accumulators updated by the proximal Adagrad optimization algorithm.
18389func RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebugAttr) (parameters tf.Output, accumulators tf.Output, gradient_accumulators tf.Output) {
18390	if scope.Err() != nil {
18391		return
18392	}
18393	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
18394	for _, a := range optional {
18395		a(attrs)
18396	}
18397	opspec := tf.OpSpec{
18398		Type: "RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug",
18399
18400		Attrs: attrs,
18401	}
18402	op := scope.AddOperation(opspec)
18403	return op.Output(0), op.Output(1), op.Output(2)
18404}
18405
18406// ResourceSparseApplyAdagradAttr is an optional argument to ResourceSparseApplyAdagrad.
18407type ResourceSparseApplyAdagradAttr func(optionalAttr)
18408
18409// ResourceSparseApplyAdagradUseLocking sets the optional use_locking attribute to value.
18410//
18411// value: If `True`, updating of the var and accum tensors will be protected
18412// by a lock; otherwise the behavior is undefined, but may exhibit less
18413// contention.
18414// If not specified, defaults to false
18415func ResourceSparseApplyAdagradUseLocking(value bool) ResourceSparseApplyAdagradAttr {
18416	return func(m optionalAttr) {
18417		m["use_locking"] = value
18418	}
18419}
18420
18421// ResourceSparseApplyAdagradUpdateSlots sets the optional update_slots attribute to value.
18422// If not specified, defaults to true
18423func ResourceSparseApplyAdagradUpdateSlots(value bool) ResourceSparseApplyAdagradAttr {
18424	return func(m optionalAttr) {
18425		m["update_slots"] = value
18426	}
18427}
18428
18429// Update relevant entries in '*var' and '*accum' according to the adagrad scheme.
18430//
18431// That is for rows we have grad for, we update var and accum as follows:
18432// accum += grad * grad
18433// var -= lr * grad * (1 / sqrt(accum))
18434//
18435// Arguments:
18436//	var_: Should be from a Variable().
18437//	accum: Should be from a Variable().
18438//	lr: Learning rate. Must be a scalar.
18439//	grad: The gradient.
18440//	indices: A vector of indices into the first dimension of var and accum.
18441//
18442// Returns the created operation.
18443func ResourceSparseApplyAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyAdagradAttr) (o *tf.Operation) {
18444	if scope.Err() != nil {
18445		return
18446	}
18447	attrs := map[string]interface{}{}
18448	for _, a := range optional {
18449		a(attrs)
18450	}
18451	opspec := tf.OpSpec{
18452		Type: "ResourceSparseApplyAdagrad",
18453		Input: []tf.Input{
18454			var_, accum, lr, grad, indices,
18455		},
18456		Attrs: attrs,
18457	}
18458	return scope.AddOperation(opspec)
18459}
18460
18461// LoadTPUEmbeddingMomentumParametersAttr is an optional argument to LoadTPUEmbeddingMomentumParameters.
18462type LoadTPUEmbeddingMomentumParametersAttr func(optionalAttr)
18463
18464// LoadTPUEmbeddingMomentumParametersTableId sets the optional table_id attribute to value.
18465// If not specified, defaults to -1
18466//
18467// REQUIRES: value >= -1
18468func LoadTPUEmbeddingMomentumParametersTableId(value int64) LoadTPUEmbeddingMomentumParametersAttr {
18469	return func(m optionalAttr) {
18470		m["table_id"] = value
18471	}
18472}
18473
18474// LoadTPUEmbeddingMomentumParametersTableName sets the optional table_name attribute to value.
18475// If not specified, defaults to ""
18476func LoadTPUEmbeddingMomentumParametersTableName(value string) LoadTPUEmbeddingMomentumParametersAttr {
18477	return func(m optionalAttr) {
18478		m["table_name"] = value
18479	}
18480}
18481
18482// Load Momentum embedding parameters.
18483//
18484// An op that loads optimization parameters into HBM for embedding. Must be
18485// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
18486// embedding table configuration. For example, this op is used to install
18487// parameters that are loaded from a checkpoint before a training loop is
18488// executed.
18489//
18490// Arguments:
18491//	parameters: Value of parameters used in the Momentum optimization algorithm.
18492//	momenta: Value of momenta used in the Momentum optimization algorithm.
18493//
18494//
18495//
18496// Returns the created operation.
18497func LoadTPUEmbeddingMomentumParameters(scope *Scope, parameters tf.Output, momenta tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingMomentumParametersAttr) (o *tf.Operation) {
18498	if scope.Err() != nil {
18499		return
18500	}
18501	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
18502	for _, a := range optional {
18503		a(attrs)
18504	}
18505	opspec := tf.OpSpec{
18506		Type: "LoadTPUEmbeddingMomentumParameters",
18507		Input: []tf.Input{
18508			parameters, momenta,
18509		},
18510		Attrs: attrs,
18511	}
18512	return scope.AddOperation(opspec)
18513}
18514
18515// Assigns sparse updates to the variable referenced by `resource`.
18516//
18517// This operation computes
18518//
18519//     # Scalar indices
18520//     ref[indices, ...] = updates[...]
18521//
18522//     # Vector indices (for each i)
18523//     ref[indices[i], ...] = updates[i, ...]
18524//
18525//     # High rank indices (for each i, ..., j)
18526//     ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]
18527//
18528// Arguments:
18529//	resource: Should be from a `Variable` node.
18530//	indices: A tensor of indices into the first dimension of `ref`.
18531//	updates: A tensor of updated values to add to `ref`.
18532//
18533// Returns the created operation.
18534func ResourceScatterUpdate(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation) {
18535	if scope.Err() != nil {
18536		return
18537	}
18538	opspec := tf.OpSpec{
18539		Type: "ResourceScatterUpdate",
18540		Input: []tf.Input{
18541			resource, indices, updates,
18542		},
18543	}
18544	return scope.AddOperation(opspec)
18545}
18546
18547// Elementwise computes the bitwise right-shift of `x` and `y`.
18548//
18549// Performs a logical shift for unsigned integer types, and an arithmetic shift
18550// for signed integer types.
18551//
18552// If `y` is negative, or greater than or equal to than the width of `x` in bits
18553// the result is implementation defined.
18554func RightShift(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
18555	if scope.Err() != nil {
18556		return
18557	}
18558	opspec := tf.OpSpec{
18559		Type: "RightShift",
18560		Input: []tf.Input{
18561			x, y,
18562		},
18563	}
18564	op := scope.AddOperation(opspec)
18565	return op.Output(0)
18566}
18567
18568// TensorListStackAttr is an optional argument to TensorListStack.
18569type TensorListStackAttr func(optionalAttr)
18570
18571// TensorListStackNumElements sets the optional num_elements attribute to value.
18572// If not specified, defaults to -1
18573func TensorListStackNumElements(value int64) TensorListStackAttr {
18574	return func(m optionalAttr) {
18575		m["num_elements"] = value
18576	}
18577}
18578
18579// Stacks all tensors in the list.
18580//
18581// Requires that all tensors have the same shape.
18582//
18583// input_handle: the input list
18584// tensor: the gathered result
18585// num_elements: optional. If not -1, the number of elements in the list.
18586//
18587func TensorListStack(scope *Scope, input_handle tf.Output, element_shape tf.Output, element_dtype tf.DataType, optional ...TensorListStackAttr) (tensor tf.Output) {
18588	if scope.Err() != nil {
18589		return
18590	}
18591	attrs := map[string]interface{}{"element_dtype": element_dtype}
18592	for _, a := range optional {
18593		a(attrs)
18594	}
18595	opspec := tf.OpSpec{
18596		Type: "TensorListStack",
18597		Input: []tf.Input{
18598			input_handle, element_shape,
18599		},
18600		Attrs: attrs,
18601	}
18602	op := scope.AddOperation(opspec)
18603	return op.Output(0)
18604}
18605
18606// A placeholder op for a value that will be fed into the computation.
18607//
18608// Arguments:
18609//	dtype: The type of elements in the tensor.
18610//	shape: The shape of the tensor.
18611//
18612// Returns A tensor that will be provided using the infeed mechanism.
18613func InfeedDequeue(scope *Scope, dtype tf.DataType, shape tf.Shape) (output tf.Output) {
18614	if scope.Err() != nil {
18615		return
18616	}
18617	attrs := map[string]interface{}{"dtype": dtype, "shape": shape}
18618	opspec := tf.OpSpec{
18619		Type: "InfeedDequeue",
18620
18621		Attrs: attrs,
18622	}
18623	op := scope.AddOperation(opspec)
18624	return op.Output(0)
18625}
18626
18627// StatelessRandomUniformAttr is an optional argument to StatelessRandomUniform.
18628type StatelessRandomUniformAttr func(optionalAttr)
18629
18630// StatelessRandomUniformDtype sets the optional dtype attribute to value.
18631//
18632// value: The type of the output.
18633// If not specified, defaults to DT_FLOAT
18634func StatelessRandomUniformDtype(value tf.DataType) StatelessRandomUniformAttr {
18635	return func(m optionalAttr) {
18636		m["dtype"] = value
18637	}
18638}
18639
18640// Outputs deterministic pseudorandom random values from a uniform distribution.
18641//
18642// The generated values follow a uniform distribution in the range `[0, 1)`. The
18643// lower bound 0 is included in the range, while the upper bound 1 is excluded.
18644//
18645// The outputs are a deterministic function of `shape` and `seed`.
18646//
18647// Arguments:
18648//	shape: The shape of the output tensor.
18649//	seed: 2 seeds (shape [2]).
18650//
18651// Returns Random values with specified shape.
18652func StatelessRandomUniform(scope *Scope, shape tf.Output, seed tf.Output, optional ...StatelessRandomUniformAttr) (output tf.Output) {
18653	if scope.Err() != nil {
18654		return
18655	}
18656	attrs := map[string]interface{}{}
18657	for _, a := range optional {
18658		a(attrs)
18659	}
18660	opspec := tf.OpSpec{
18661		Type: "StatelessRandomUniform",
18662		Input: []tf.Input{
18663			shape, seed,
18664		},
18665		Attrs: attrs,
18666	}
18667	op := scope.AddOperation(opspec)
18668	return op.Output(0)
18669}
18670
18671// Makes its input available to the next iteration.
18672//
18673// Arguments:
18674//	data: The tensor to be made available to the next iteration.
18675//
18676// Returns The same tensor as `data`.
18677func NextIteration(scope *Scope, data tf.Output) (output tf.Output) {
18678	if scope.Err() != nil {
18679		return
18680	}
18681	opspec := tf.OpSpec{
18682		Type: "NextIteration",
18683		Input: []tf.Input{
18684			data,
18685		},
18686	}
18687	op := scope.AddOperation(opspec)
18688	return op.Output(0)
18689}
18690
18691// Output a fact about factorials.
18692func Fact(scope *Scope) (fact tf.Output) {
18693	if scope.Err() != nil {
18694		return
18695	}
18696	opspec := tf.OpSpec{
18697		Type: "Fact",
18698	}
18699	op := scope.AddOperation(opspec)
18700	return op.Output(0)
18701}
18702
18703// Returns the truth value of x AND y element-wise.
18704//
18705// *NOTE*: `LogicalAnd` supports broadcasting. More about broadcasting
18706// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
18707func LogicalAnd(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
18708	if scope.Err() != nil {
18709		return
18710	}
18711	opspec := tf.OpSpec{
18712		Type: "LogicalAnd",
18713		Input: []tf.Input{
18714			x, y,
18715		},
18716	}
18717	op := scope.AddOperation(opspec)
18718	return op.Output(0)
18719}
18720
18721// GenerateVocabRemappingAttr is an optional argument to GenerateVocabRemapping.
18722type GenerateVocabRemappingAttr func(optionalAttr)
18723
18724// GenerateVocabRemappingOldVocabSize sets the optional old_vocab_size attribute to value.
18725//
18726// value: Number of entries in the old vocab file to consider.  If -1,
18727// use the entire old vocabulary.
18728// If not specified, defaults to -1
18729//
18730// REQUIRES: value >= -1
18731func GenerateVocabRemappingOldVocabSize(value int64) GenerateVocabRemappingAttr {
18732	return func(m optionalAttr) {
18733		m["old_vocab_size"] = value
18734	}
18735}
18736
18737// Given a path to new and old vocabulary files, returns a remapping Tensor of
18738//
18739// length `num_new_vocab`, where `remapping[i]` contains the row number in the old
18740// vocabulary that corresponds to row `i` in the new vocabulary (starting at line
18741// `new_vocab_offset` and up to `num_new_vocab` entities), or `-1` if entry `i`
18742// in the new vocabulary is not in the old vocabulary.  The old vocabulary is
18743// constrained to the first `old_vocab_size` entries if `old_vocab_size` is not the
18744// default value of -1.
18745//
18746// `num_vocab_offset` enables
18747// use in the partitioned variable case, and should generally be set through
18748// examining partitioning info.  The format of the files should be a text file,
18749// with each line containing a single entity within the vocabulary.
18750//
18751// For example, with `new_vocab_file` a text file containing each of the following
18752// elements on a single line: `[f0, f1, f2, f3]`, old_vocab_file = [f1, f0, f3],
18753// `num_new_vocab = 3, new_vocab_offset = 1`, the returned remapping would be
18754// `[0, -1, 2]`.
18755//
18756// The op also returns a count of how many entries in the new vocabulary
18757// were present in the old vocabulary, which is used to calculate the number of
18758// values to initialize in a weight matrix remapping
18759//
18760// This functionality can be used to remap both row vocabularies (typically,
18761// features) and column vocabularies (typically, classes) from TensorFlow
18762// checkpoints.  Note that the partitioning logic relies on contiguous vocabularies
18763// corresponding to div-partitioned variables.  Moreover, the underlying remapping
18764// uses an IndexTable (as opposed to an inexact CuckooTable), so client code should
18765// use the corresponding index_table_from_file() as the FeatureColumn framework
18766// does (as opposed to tf.feature_to_id(), which uses a CuckooTable).
18767//
18768// Arguments:
18769//	new_vocab_file: Path to the new vocab file.
18770//	old_vocab_file: Path to the old vocab file.
18771//	new_vocab_offset: How many entries into the new vocab file to start reading.
18772//	num_new_vocab: Number of entries in the new vocab file to remap.
18773//
18774// Returns A Tensor of length num_new_vocab where the element at index i
18775// is equal to the old ID that maps to the new ID i.  This element is -1 for any
18776// new ID that is not found in the old vocabulary.Number of new vocab entries found in old vocab.
18777func GenerateVocabRemapping(scope *Scope, new_vocab_file tf.Output, old_vocab_file tf.Output, new_vocab_offset int64, num_new_vocab int64, optional ...GenerateVocabRemappingAttr) (remapping tf.Output, num_present tf.Output) {
18778	if scope.Err() != nil {
18779		return
18780	}
18781	attrs := map[string]interface{}{"new_vocab_offset": new_vocab_offset, "num_new_vocab": num_new_vocab}
18782	for _, a := range optional {
18783		a(attrs)
18784	}
18785	opspec := tf.OpSpec{
18786		Type: "GenerateVocabRemapping",
18787		Input: []tf.Input{
18788			new_vocab_file, old_vocab_file,
18789		},
18790		Attrs: attrs,
18791	}
18792	op := scope.AddOperation(opspec)
18793	return op.Output(0), op.Output(1)
18794}
18795
18796// Says whether the targets are in the top `K` predictions.
18797//
18798// This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
18799// prediction for the target class is among the top `k` predictions among
18800// all predictions for example `i`. Note that the behavior of `InTopK` differs
18801// from the `TopK` op in its handling of ties; if multiple classes have the
18802// same prediction value and straddle the top-`k` boundary, all of those
18803// classes are considered to be in the top `k`.
18804//
18805// More formally, let
18806//
18807//   \\(predictions_i\\) be the predictions for all classes for example `i`,
18808//   \\(targets_i\\) be the target class for example `i`,
18809//   \\(out_i\\) be the output for example `i`,
18810//
18811// $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
18812//
18813// Arguments:
18814//	predictions: A `batch_size` x `classes` tensor.
18815//	targets: A `batch_size` vector of class ids.
18816//	k: Number of top elements to look at for computing precision.
18817//
18818// Returns Computed Precision at `k` as a `bool Tensor`.
18819func InTopK(scope *Scope, predictions tf.Output, targets tf.Output, k int64) (precision tf.Output) {
18820	if scope.Err() != nil {
18821		return
18822	}
18823	attrs := map[string]interface{}{"k": k}
18824	opspec := tf.OpSpec{
18825		Type: "InTopK",
18826		Input: []tf.Input{
18827			predictions, targets,
18828		},
18829		Attrs: attrs,
18830	}
18831	op := scope.AddOperation(opspec)
18832	return op.Output(0)
18833}
18834
18835// RetrieveTPUEmbeddingAdagradParametersGradAccumDebugAttr is an optional argument to RetrieveTPUEmbeddingAdagradParametersGradAccumDebug.
18836type RetrieveTPUEmbeddingAdagradParametersGradAccumDebugAttr func(optionalAttr)
18837
18838// RetrieveTPUEmbeddingAdagradParametersGradAccumDebugTableId sets the optional table_id attribute to value.
18839// If not specified, defaults to -1
18840//
18841// REQUIRES: value >= -1
18842func RetrieveTPUEmbeddingAdagradParametersGradAccumDebugTableId(value int64) RetrieveTPUEmbeddingAdagradParametersGradAccumDebugAttr {
18843	return func(m optionalAttr) {
18844		m["table_id"] = value
18845	}
18846}
18847
18848// RetrieveTPUEmbeddingAdagradParametersGradAccumDebugTableName sets the optional table_name attribute to value.
18849// If not specified, defaults to ""
18850func RetrieveTPUEmbeddingAdagradParametersGradAccumDebugTableName(value string) RetrieveTPUEmbeddingAdagradParametersGradAccumDebugAttr {
18851	return func(m optionalAttr) {
18852		m["table_name"] = value
18853	}
18854}
18855
18856// Retrieve Adagrad embedding parameters with debug support.
18857//
18858// An op that retrieves optimization parameters from embedding to host
18859// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
18860// the correct embedding table configuration. For example, this op is
18861// used to retrieve updated parameters before saving a checkpoint.
18862//
18863// Returns Parameter parameters updated by the Adagrad optimization algorithm.Parameter accumulators updated by the Adagrad optimization algorithm.Parameter gradient_accumulators updated by the Adagrad optimization algorithm.
18864func RetrieveTPUEmbeddingAdagradParametersGradAccumDebug(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingAdagradParametersGradAccumDebugAttr) (parameters tf.Output, accumulators tf.Output, gradient_accumulators tf.Output) {
18865	if scope.Err() != nil {
18866		return
18867	}
18868	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
18869	for _, a := range optional {
18870		a(attrs)
18871	}
18872	opspec := tf.OpSpec{
18873		Type: "RetrieveTPUEmbeddingAdagradParametersGradAccumDebug",
18874
18875		Attrs: attrs,
18876	}
18877	op := scope.AddOperation(opspec)
18878	return op.Output(0), op.Output(1), op.Output(2)
18879}
18880
18881// This op consumes a lock created by `MutexLock`.
18882//
18883// This op exists to consume a tensor created by `MutexLock` (other than
18884// direct control dependencies).  It should be the only that consumes the tensor,
18885// and will raise an error if it is not.  Its only purpose is to keep the
18886// mutex lock tensor alive until it is consumed by this op.
18887//
18888// **NOTE**: This operation must run on the same device as its input.  This may
18889// be enforced via the `colocate_with` mechanism.
18890//
18891// Arguments:
18892//	mutex_lock: A tensor returned by `MutexLock`.
18893//
18894// Returns the created operation.
18895func ConsumeMutexLock(scope *Scope, mutex_lock tf.Output) (o *tf.Operation) {
18896	if scope.Err() != nil {
18897		return
18898	}
18899	opspec := tf.OpSpec{
18900		Type: "ConsumeMutexLock",
18901		Input: []tf.Input{
18902			mutex_lock,
18903		},
18904	}
18905	return scope.AddOperation(opspec)
18906}
18907
18908// ResourceScatterNdAddAttr is an optional argument to ResourceScatterNdAdd.
18909type ResourceScatterNdAddAttr func(optionalAttr)
18910
18911// ResourceScatterNdAddUseLocking sets the optional use_locking attribute to value.
18912//
18913// value: An optional bool. Defaults to True. If True, the assignment will
18914// be protected by a lock; otherwise the behavior is undefined,
18915// but may exhibit less contention.
18916// If not specified, defaults to true
18917func ResourceScatterNdAddUseLocking(value bool) ResourceScatterNdAddAttr {
18918	return func(m optionalAttr) {
18919		m["use_locking"] = value
18920	}
18921}
18922
18923// Applies sparse addition to individual values or slices in a Variable.
18924//
18925// `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
18926//
18927// `indices` must be integer tensor, containing indices into `ref`.
18928// It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
18929//
18930// The innermost dimension of `indices` (with length `K`) corresponds to
18931// indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
18932// dimension of `ref`.
18933//
18934// `updates` is `Tensor` of rank `Q-1+P-K` with shape:
18935//
18936// ```
18937// [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]
18938// ```
18939//
18940// For example, say we want to add 4 scattered elements to a rank-1 tensor to
18941// 8 elements. In Python, that addition would look like this:
18942//
18943// ```python
18944// ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True)
18945// indices = tf.constant([[4], [3], [1], [7]])
18946// updates = tf.constant([9, 10, 11, 12])
18947// add = tf.scatter_nd_add(ref, indices, updates)
18948// with tf.Session() as sess:
18949//   print sess.run(add)
18950// ```
18951//
18952// The resulting update to ref would look like this:
18953//
18954//     [1, 13, 3, 14, 14, 6, 7, 20]
18955//
18956// See `tf.scatter_nd` for more details about how to make updates to
18957// slices.
18958//
18959// Arguments:
18960//	ref: A resource handle. Must be from a VarHandleOp.
18961//	indices: A Tensor. Must be one of the following types: int32, int64.
18962// A tensor of indices into ref.
18963//	updates: A Tensor. Must have the same type as ref. A tensor of
18964// values to add to ref.
18965//
18966// Returns the created operation.
18967func ResourceScatterNdAdd(scope *Scope, ref tf.Output, indices tf.Output, updates tf.Output, optional ...ResourceScatterNdAddAttr) (o *tf.Operation) {
18968	if scope.Err() != nil {
18969		return
18970	}
18971	attrs := map[string]interface{}{}
18972	for _, a := range optional {
18973		a(attrs)
18974	}
18975	opspec := tf.OpSpec{
18976		Type: "ResourceScatterNdAdd",
18977		Input: []tf.Input{
18978			ref, indices, updates,
18979		},
18980		Attrs: attrs,
18981	}
18982	return scope.AddOperation(opspec)
18983}
18984
18985// Replaces the contents of the table with the specified keys and values.
18986//
18987// The tensor `keys` must be of the same type as the keys of the table.
18988// The tensor `values` must be of the type of the table values.
18989//
18990// Arguments:
18991//	table_handle: Handle to the table.
18992//	keys: Any shape.  Keys to look up.
18993//	values: Values to associate with keys.
18994//
18995// Returns the created operation.
18996func LookupTableImportV2(scope *Scope, table_handle tf.Output, keys tf.Output, values tf.Output) (o *tf.Operation) {
18997	if scope.Err() != nil {
18998		return
18999	}
19000	opspec := tf.OpSpec{
19001		Type: "LookupTableImportV2",
19002		Input: []tf.Input{
19003			table_handle, keys, values,
19004		},
19005	}
19006	return scope.AddOperation(opspec)
19007}
19008
19009// Worker heartbeat op.
19010//
19011// Heartbeats may be sent periodically to indicate the coordinator is still active,
19012// to retrieve the current worker status and to expedite shutdown when necessary.
19013//
19014// Arguments:
19015//	request: A string tensor containing a serialized WorkerHeartbeatRequest
19016//
19017// Returns A string tensor containing a serialized WorkerHeartbeatResponse
19018func WorkerHeartbeat(scope *Scope, request tf.Output) (response tf.Output) {
19019	if scope.Err() != nil {
19020		return
19021	}
19022	opspec := tf.OpSpec{
19023		Type: "WorkerHeartbeat",
19024		Input: []tf.Input{
19025			request,
19026		},
19027	}
19028	op := scope.AddOperation(opspec)
19029	return op.Output(0)
19030}
19031
19032// Returns the truth value of (x <= y) element-wise.
19033//
19034// *NOTE*: `LessEqual` supports broadcasting. More about broadcasting
19035// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
19036func LessEqual(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
19037	if scope.Err() != nil {
19038		return
19039	}
19040	opspec := tf.OpSpec{
19041		Type: "LessEqual",
19042		Input: []tf.Input{
19043			x, y,
19044		},
19045	}
19046	op := scope.AddOperation(opspec)
19047	return op.Output(0)
19048}
19049
19050// EnqueueTPUEmbeddingIntegerBatchAttr is an optional argument to EnqueueTPUEmbeddingIntegerBatch.
19051type EnqueueTPUEmbeddingIntegerBatchAttr func(optionalAttr)
19052
19053// EnqueueTPUEmbeddingIntegerBatchDeviceOrdinal sets the optional device_ordinal attribute to value.
19054//
19055// value: The TPU device to use. Should be >= 0 and less than the number
19056// of TPU cores in the task on which the node is placed.
19057// If not specified, defaults to -1
19058func EnqueueTPUEmbeddingIntegerBatchDeviceOrdinal(value int64) EnqueueTPUEmbeddingIntegerBatchAttr {
19059	return func(m optionalAttr) {
19060		m["device_ordinal"] = value
19061	}
19062}
19063
19064// An op that enqueues a list of input batch tensors to TPUEmbedding.
19065//
19066// Arguments:
19067//	batch: A list of 1D tensors, one for each embedding table, containing the
19068// indices into the tables.
19069//	mode_override: A string input that overrides the mode specified in the
19070// TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',
19071// 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set
19072// in TPUEmbeddingConfiguration is used, otherwise mode_override is used.
19073//
19074// Returns the created operation.
19075func EnqueueTPUEmbeddingIntegerBatch(scope *Scope, batch []tf.Output, mode_override tf.Output, optional ...EnqueueTPUEmbeddingIntegerBatchAttr) (o *tf.Operation) {
19076	if scope.Err() != nil {
19077		return
19078	}
19079	attrs := map[string]interface{}{}
19080	for _, a := range optional {
19081		a(attrs)
19082	}
19083	opspec := tf.OpSpec{
19084		Type: "EnqueueTPUEmbeddingIntegerBatch",
19085		Input: []tf.Input{
19086			tf.OutputList(batch), mode_override,
19087		},
19088		Attrs: attrs,
19089	}
19090	return scope.AddOperation(opspec)
19091}
19092
19093// An op that receives embedding activations on the TPU.
19094//
19095// The TPU system performs the embedding lookups and aggregations specified by
19096// the arguments to TPUEmbeddingEnqueue(Integer/Sparse/SparseTensor)Batch. The
19097// results of these aggregations are visible to the Tensorflow Graph as the
19098// outputs of a RecvTPUEmbeddingActivations op. This op returns a list containing
19099// one Tensor of activations per table specified in the model. There can be at
19100// most one RecvTPUEmbeddingActivations op in the TPU graph.
19101//
19102// Arguments:
19103//	num_outputs: The number of output activation tensors, equal to the number of
19104// embedding tables in the model.
19105//	config: Serialized TPUEmbeddingConfiguration proto.
19106//
19107// Returns A TensorList of embedding activations containing one Tensor per
19108// embedding table in the model.
19109func RecvTPUEmbeddingActivations(scope *Scope, num_outputs int64, config string) (outputs []tf.Output) {
19110	if scope.Err() != nil {
19111		return
19112	}
19113	attrs := map[string]interface{}{"num_outputs": num_outputs, "config": config}
19114	opspec := tf.OpSpec{
19115		Type: "RecvTPUEmbeddingActivations",
19116
19117		Attrs: attrs,
19118	}
19119	op := scope.AddOperation(opspec)
19120	if scope.Err() != nil {
19121		return
19122	}
19123	var idx int
19124	var err error
19125	if outputs, idx, err = makeOutputList(op, idx, "outputs"); err != nil {
19126		scope.UpdateErr("RecvTPUEmbeddingActivations", err)
19127		return
19128	}
19129	return outputs
19130}
19131
19132// Selects elements from `x` or `y`, depending on `condition`.
19133//
19134// The `x`, and `y` tensors must all have the same shape, and the
19135// output will also have that shape.
19136//
19137// The `condition` tensor must be a scalar if `x` and `y` are scalars.
19138// If `x` and `y` are vectors or higher rank, then `condition` must be either a
19139// scalar, a vector with size matching the first dimension of `x`, or must have
19140// the same shape as `x`.
19141//
19142// The `condition` tensor acts as a mask that chooses, based on the value at each
19143// element, whether the corresponding element / row in the output should be
19144// taken from `x` (if true) or `y` (if false).
19145//
19146// If `condition` is a vector and `x` and `y` are higher rank matrices, then
19147// it chooses which row (outer dimension) to copy from `x` and `y`.
19148// If `condition` has the same shape as `x` and `y`, then it chooses which
19149// element to copy from `x` and `y`.
19150//
19151// For example:
19152//
19153// ```python
19154// # 'condition' tensor is [[True,  False]
19155// #                        [False, True]]
19156// # 't' is [[1, 2],
19157// #         [3, 4]]
19158// # 'e' is [[5, 6],
19159// #         [7, 8]]
19160// select(condition, t, e)  # => [[1, 6], [7, 4]]
19161//
19162//
19163// # 'condition' tensor is [True, False]
19164// # 't' is [[1, 2],
19165// #         [3, 4]]
19166// # 'e' is [[5, 6],
19167// #         [7, 8]]
19168// select(condition, t, e) ==> [[1, 2],
19169//                              [7, 8]]
19170//
19171// ```
19172//
19173// Arguments:
19174//
19175//	x: = A `Tensor` which may have the same shape as `condition`.
19176// If `condition` is rank 1, `x` may have higher rank,
19177// but its first dimension must match the size of `condition`.
19178//	y: = A `Tensor` with the same type and shape as `x`.
19179//
19180// Returns = A `Tensor` with the same type and shape as `x` and `y`.
19181func Select(scope *Scope, condition tf.Output, x tf.Output, y tf.Output) (output tf.Output) {
19182	if scope.Err() != nil {
19183		return
19184	}
19185	opspec := tf.OpSpec{
19186		Type: "Select",
19187		Input: []tf.Input{
19188			condition, x, y,
19189		},
19190	}
19191	op := scope.AddOperation(opspec)
19192	return op.Output(0)
19193}
19194
19195// Returns the set of files matching one or more glob patterns.
19196//
19197// Note that this routine only supports wildcard characters in the
19198// basename portion of the pattern, not in the directory portion.
19199// Note also that the order of filenames returned can be non-deterministic.
19200//
19201// Arguments:
19202//	pattern: Shell wildcard pattern(s). Scalar or vector of type string.
19203//
19204// Returns A vector of matching filenames.
19205func MatchingFiles(scope *Scope, pattern tf.Output) (filenames tf.Output) {
19206	if scope.Err() != nil {
19207		return
19208	}
19209	opspec := tf.OpSpec{
19210		Type: "MatchingFiles",
19211		Input: []tf.Input{
19212			pattern,
19213		},
19214	}
19215	op := scope.AddOperation(opspec)
19216	return op.Output(0)
19217}
19218
19219// SqueezeAttr is an optional argument to Squeeze.
19220type SqueezeAttr func(optionalAttr)
19221
19222// SqueezeAxis sets the optional axis attribute to value.
19223//
19224// value: If specified, only squeezes the dimensions listed. The dimension
19225// index starts at 0. It is an error to squeeze a dimension that is not 1. Must
19226// be in the range `[-rank(input), rank(input))`.
19227// If not specified, defaults to <>
19228//
19229// REQUIRES: len(value) >= 0
19230func SqueezeAxis(value []int64) SqueezeAttr {
19231	return func(m optionalAttr) {
19232		m["squeeze_dims"] = value
19233	}
19234}
19235
19236// Removes dimensions of size 1 from the shape of a tensor.
19237//
19238// Given a tensor `input`, this operation returns a tensor of the same type with
19239// all dimensions of size 1 removed. If you don't want to remove all size 1
19240// dimensions, you can remove specific size 1 dimensions by specifying
19241// `axis`.
19242//
19243// For example:
19244//
19245// ```
19246// # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
19247// shape(squeeze(t)) ==> [2, 3]
19248// ```
19249//
19250// Or, to remove specific size 1 dimensions:
19251//
19252// ```
19253// # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
19254// shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1]
19255// ```
19256//
19257// Arguments:
19258//	input: The `input` to squeeze.
19259//
19260// Returns Contains the same data as `input`, but has one or more dimensions of
19261// size 1 removed.
19262func Squeeze(scope *Scope, input tf.Output, optional ...SqueezeAttr) (output tf.Output) {
19263	if scope.Err() != nil {
19264		return
19265	}
19266	attrs := map[string]interface{}{}
19267	for _, a := range optional {
19268		a(attrs)
19269	}
19270	opspec := tf.OpSpec{
19271		Type: "Squeeze",
19272		Input: []tf.Input{
19273			input,
19274		},
19275		Attrs: attrs,
19276	}
19277	op := scope.AddOperation(opspec)
19278	return op.Output(0)
19279}
19280
19281// ResourceApplyAdadeltaAttr is an optional argument to ResourceApplyAdadelta.
19282type ResourceApplyAdadeltaAttr func(optionalAttr)
19283
19284// ResourceApplyAdadeltaUseLocking sets the optional use_locking attribute to value.
19285//
19286// value: If True, updating of the var, accum and update_accum tensors will be protected by
19287// a lock; otherwise the behavior is undefined, but may exhibit less contention.
19288// If not specified, defaults to false
19289func ResourceApplyAdadeltaUseLocking(value bool) ResourceApplyAdadeltaAttr {
19290	return func(m optionalAttr) {
19291		m["use_locking"] = value
19292	}
19293}
19294
19295// Update '*var' according to the adadelta scheme.
19296//
19297// accum = rho() * accum + (1 - rho()) * grad.square();
19298// update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad;
19299// update_accum = rho() * update_accum + (1 - rho()) * update.square();
19300// var -= update;
19301//
19302// Arguments:
19303//	var_: Should be from a Variable().
19304//	accum: Should be from a Variable().
19305//	accum_update: Should be from a Variable().
19306//	lr: Scaling factor. Must be a scalar.
19307//	rho: Decay factor. Must be a scalar.
19308//	epsilon: Constant factor. Must be a scalar.
19309//	grad: The gradient.
19310//
19311// Returns the created operation.
19312func ResourceApplyAdadelta(scope *Scope, var_ tf.Output, accum tf.Output, accum_update tf.Output, lr tf.Output, rho tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyAdadeltaAttr) (o *tf.Operation) {
19313	if scope.Err() != nil {
19314		return
19315	}
19316	attrs := map[string]interface{}{}
19317	for _, a := range optional {
19318		a(attrs)
19319	}
19320	opspec := tf.OpSpec{
19321		Type: "ResourceApplyAdadelta",
19322		Input: []tf.Input{
19323			var_, accum, accum_update, lr, rho, epsilon, grad,
19324		},
19325		Attrs: attrs,
19326	}
19327	return scope.AddOperation(opspec)
19328}
19329
19330// NonMaxSuppressionAttr is an optional argument to NonMaxSuppression.
19331type NonMaxSuppressionAttr func(optionalAttr)
19332
19333// NonMaxSuppressionIouThreshold sets the optional iou_threshold attribute to value.
19334//
19335// value: A float representing the threshold for deciding whether boxes
19336// overlap too much with respect to IOU.
19337// If not specified, defaults to 0.5
19338func NonMaxSuppressionIouThreshold(value float32) NonMaxSuppressionAttr {
19339	return func(m optionalAttr) {
19340		m["iou_threshold"] = value
19341	}
19342}
19343
19344// Greedily selects a subset of bounding boxes in descending order of score,
19345//
19346// pruning away boxes that have high intersection-over-union (IOU) overlap
19347// with previously selected boxes.  Bounding boxes are supplied as
19348// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
19349// diagonal pair of box corners and the coordinates can be provided as normalized
19350// (i.e., lying in the interval [0, 1]) or absolute.  Note that this algorithm
19351// is agnostic to where the origin is in the coordinate system.  Note that this
19352// algorithm is invariant to orthogonal transformations and translations
19353// of the coordinate system; thus translating or reflections of the coordinate
19354// system result in the same boxes being selected by the algorithm.
19355// The output of this operation is a set of integers indexing into the input
19356// collection of bounding boxes representing the selected boxes.  The bounding
19357// box coordinates corresponding to the selected indices can then be obtained
19358// using the `tf.gather operation`.  For example:
19359//   selected_indices = tf.image.non_max_suppression(
19360//       boxes, scores, max_output_size, iou_threshold)
19361//   selected_boxes = tf.gather(boxes, selected_indices)
19362//
19363// Arguments:
19364//	boxes: A 2-D float tensor of shape `[num_boxes, 4]`.
19365//	scores: A 1-D float tensor of shape `[num_boxes]` representing a single
19366// score corresponding to each box (each row of boxes).
19367//	max_output_size: A scalar integer tensor representing the maximum number of
19368// boxes to be selected by non max suppression.
19369//
19370// Returns A 1-D integer tensor of shape `[M]` representing the selected
19371// indices from the boxes tensor, where `M <= max_output_size`.
19372func NonMaxSuppression(scope *Scope, boxes tf.Output, scores tf.Output, max_output_size tf.Output, optional ...NonMaxSuppressionAttr) (selected_indices tf.Output) {
19373	if scope.Err() != nil {
19374		return
19375	}
19376	attrs := map[string]interface{}{}
19377	for _, a := range optional {
19378		a(attrs)
19379	}
19380	opspec := tf.OpSpec{
19381		Type: "NonMaxSuppression",
19382		Input: []tf.Input{
19383			boxes, scores, max_output_size,
19384		},
19385		Attrs: attrs,
19386	}
19387	op := scope.AddOperation(opspec)
19388	return op.Output(0)
19389}
19390
19391// Creates a dataset that emits `components` as a tuple of tensors once.
19392func TensorDataset(scope *Scope, components []tf.Output, output_shapes []tf.Shape) (handle tf.Output) {
19393	if scope.Err() != nil {
19394		return
19395	}
19396	attrs := map[string]interface{}{"output_shapes": output_shapes}
19397	opspec := tf.OpSpec{
19398		Type: "TensorDataset",
19399		Input: []tf.Input{
19400			tf.OutputList(components),
19401		},
19402		Attrs: attrs,
19403	}
19404	op := scope.AddOperation(opspec)
19405	return op.Output(0)
19406}
19407
19408// 2D real-valued fast Fourier transform.
19409//
19410// Computes the 2-dimensional discrete Fourier transform of a real-valued signal
19411// over the inner-most 2 dimensions of `input`.
19412//
19413// Since the DFT of a real signal is Hermitian-symmetric, `RFFT2D` only returns the
19414// `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension
19415// of `output`: the zero-frequency term, followed by the `fft_length / 2`
19416// positive-frequency terms.
19417//
19418// Along each axis `RFFT2D` is computed on, if `fft_length` is smaller than the
19419// corresponding dimension of `input`, the dimension is cropped. If it is larger,
19420// the dimension is padded with zeros.
19421//
19422// Arguments:
19423//	input: A float32 tensor.
19424//	fft_length: An int32 tensor of shape [2]. The FFT length for each dimension.
19425//
19426// Returns A complex64 tensor of the same rank as `input`. The inner-most 2
19427//   dimensions of `input` are replaced with their 2D Fourier transform. The
19428//   inner-most dimension contains `fft_length / 2 + 1` unique frequency
19429//   components.
19430//
19431// @compatibility(numpy)
19432// Equivalent to np.fft.rfft2
19433// @end_compatibility
19434func RFFT2D(scope *Scope, input tf.Output, fft_length tf.Output) (output tf.Output) {
19435	if scope.Err() != nil {
19436		return
19437	}
19438	opspec := tf.OpSpec{
19439		Type: "RFFT2D",
19440		Input: []tf.Input{
19441			input, fft_length,
19442		},
19443	}
19444	op := scope.AddOperation(opspec)
19445	return op.Output(0)
19446}
19447
19448// ResourceSparseApplyFtrlAttr is an optional argument to ResourceSparseApplyFtrl.
19449type ResourceSparseApplyFtrlAttr func(optionalAttr)
19450
19451// ResourceSparseApplyFtrlUseLocking sets the optional use_locking attribute to value.
19452//
19453// value: If `True`, updating of the var and accum tensors will be protected
19454// by a lock; otherwise the behavior is undefined, but may exhibit less
19455// contention.
19456// If not specified, defaults to false
19457func ResourceSparseApplyFtrlUseLocking(value bool) ResourceSparseApplyFtrlAttr {
19458	return func(m optionalAttr) {
19459		m["use_locking"] = value
19460	}
19461}
19462
19463// Update relevant entries in '*var' according to the Ftrl-proximal scheme.
19464//
19465// That is for rows we have grad for, we update var, accum and linear as follows:
19466// accum_new = accum + grad * grad
19467// linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
19468// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
19469// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
19470// accum = accum_new
19471//
19472// Arguments:
19473//	var_: Should be from a Variable().
19474//	accum: Should be from a Variable().
19475//	linear: Should be from a Variable().
19476//	grad: The gradient.
19477//	indices: A vector of indices into the first dimension of var and accum.
19478//	lr: Scaling factor. Must be a scalar.
19479//	l1: L1 regularization. Must be a scalar.
19480//	l2: L2 regularization. Must be a scalar.
19481//	lr_power: Scaling factor. Must be a scalar.
19482//
19483// Returns the created operation.
19484func ResourceSparseApplyFtrl(scope *Scope, var_ tf.Output, accum tf.Output, linear tf.Output, grad tf.Output, indices tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, lr_power tf.Output, optional ...ResourceSparseApplyFtrlAttr) (o *tf.Operation) {
19485	if scope.Err() != nil {
19486		return
19487	}
19488	attrs := map[string]interface{}{}
19489	for _, a := range optional {
19490		a(attrs)
19491	}
19492	opspec := tf.OpSpec{
19493		Type: "ResourceSparseApplyFtrl",
19494		Input: []tf.Input{
19495			var_, accum, linear, grad, indices, lr, l1, l2, lr_power,
19496		},
19497		Attrs: attrs,
19498	}
19499	return scope.AddOperation(opspec)
19500}
19501
19502// Returns which elements of x are Inf.
19503//
19504// @compatibility(numpy)
19505// Equivalent to np.isinf
19506// @end_compatibility
19507func IsInf(scope *Scope, x tf.Output) (y tf.Output) {
19508	if scope.Err() != nil {
19509		return
19510	}
19511	opspec := tf.OpSpec{
19512		Type: "IsInf",
19513		Input: []tf.Input{
19514			x,
19515		},
19516	}
19517	op := scope.AddOperation(opspec)
19518	return op.Output(0)
19519}
19520
19521// Gather ragged slices from `params` axis `0` according to `indices`.
19522//
19523// Outputs a `RaggedTensor` output composed from `output_dense_values` and
19524// `output_nested_splits`, such that:
19525//
19526// ```python
19527// output.shape = indices.shape + params.shape[1:]
19528// output.ragged_rank = indices.shape.ndims + params.ragged_rank
19529// output[i...j, d0...dn] = params[indices[i...j], d0...dn]
19530// ```
19531//
19532// where
19533//
19534// * `params =
19535//    ragged.from_nested_row_splits(params_dense_values, params_nested_splits)`
19536//    provides the values that should be gathered.
19537// * `indices` ia a dense tensor with dtype `int32` or `int64`, indicating which
19538//    values should be gathered.
19539// * `output =
19540//    ragged.from_nested_row_splits(output_dense_values, output_nested_splits)`
19541//    is the output tensor.
19542//
19543// (Note: This c++ op is used to implement the higher-level python
19544// `tf.ragged.gather` op, which also supports ragged indices.)
19545//
19546//
19547// Arguments:
19548//	params_nested_splits: The `nested_row_splits` tensors that define the row-partitioning for the
19549// `params` RaggedTensor input.
19550//	params_dense_values: The `flat_values` for the `params` RaggedTensor. There was a terminology change
19551// at the python level from dense_values to flat_values, so dense_values is the
19552// deprecated name.
19553//	indices: Indices in the outermost dimension of `params` of the values that should be
19554// gathered.
19555//	OUTPUT_RAGGED_RANK: The ragged rank of the output RaggedTensor. `output_nested_splits` will contain
19556// this number of `row_splits` tensors. This value should equal
19557// `indices.shape.ndims + params.ragged_rank - 1`.
19558//
19559// Returns The `nested_row_splits` tensors that define the row-partitioning for the
19560// returned RaggedTensor.The `flat_values` for the returned RaggedTensor.
19561func RaggedGather(scope *Scope, params_nested_splits []tf.Output, params_dense_values tf.Output, indices tf.Output, OUTPUT_RAGGED_RANK int64) (output_nested_splits []tf.Output, output_dense_values tf.Output) {
19562	if scope.Err() != nil {
19563		return
19564	}
19565	attrs := map[string]interface{}{"OUTPUT_RAGGED_RANK": OUTPUT_RAGGED_RANK}
19566	opspec := tf.OpSpec{
19567		Type: "RaggedGather",
19568		Input: []tf.Input{
19569			tf.OutputList(params_nested_splits), params_dense_values, indices,
19570		},
19571		Attrs: attrs,
19572	}
19573	op := scope.AddOperation(opspec)
19574	if scope.Err() != nil {
19575		return
19576	}
19577	var idx int
19578	var err error
19579	if output_nested_splits, idx, err = makeOutputList(op, idx, "output_nested_splits"); err != nil {
19580		scope.UpdateErr("RaggedGather", err)
19581		return
19582	}
19583	output_dense_values = op.Output(idx)
19584	return output_nested_splits, output_dense_values
19585}
19586
19587// Greedily selects a subset of bounding boxes in descending order of score,
19588//
19589// pruning away boxes that have high intersection-over-union (IOU) overlap
19590// with previously selected boxes.  Bounding boxes are supplied as
19591// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
19592// diagonal pair of box corners and the coordinates can be provided as normalized
19593// (i.e., lying in the interval [0, 1]) or absolute.  Note that this algorithm
19594// is agnostic to where the origin is in the coordinate system.  Note that this
19595// algorithm is invariant to orthogonal transformations and translations
19596// of the coordinate system; thus translating or reflections of the coordinate
19597// system result in the same boxes being selected by the algorithm.
19598//
19599// The output of this operation is a set of integers indexing into the input
19600// collection of bounding boxes representing the selected boxes.  The bounding
19601// box coordinates corresponding to the selected indices can then be obtained
19602// using the `tf.gather operation`.  For example:
19603//
19604//   selected_indices = tf.image.non_max_suppression_v2(
19605//       boxes, scores, max_output_size, iou_threshold)
19606//   selected_boxes = tf.gather(boxes, selected_indices)
19607//
19608// Arguments:
19609//	boxes: A 2-D float tensor of shape `[num_boxes, 4]`.
19610//	scores: A 1-D float tensor of shape `[num_boxes]` representing a single
19611// score corresponding to each box (each row of boxes).
19612//	max_output_size: A scalar integer tensor representing the maximum number of
19613// boxes to be selected by non max suppression.
19614//	iou_threshold: A 0-D float tensor representing the threshold for deciding whether
19615// boxes overlap too much with respect to IOU.
19616//
19617// Returns A 1-D integer tensor of shape `[M]` representing the selected
19618// indices from the boxes tensor, where `M <= max_output_size`.
19619func NonMaxSuppressionV2(scope *Scope, boxes tf.Output, scores tf.Output, max_output_size tf.Output, iou_threshold tf.Output) (selected_indices tf.Output) {
19620	if scope.Err() != nil {
19621		return
19622	}
19623	opspec := tf.OpSpec{
19624		Type: "NonMaxSuppressionV2",
19625		Input: []tf.Input{
19626			boxes, scores, max_output_size, iou_threshold,
19627		},
19628	}
19629	op := scope.AddOperation(opspec)
19630	return op.Output(0)
19631}
19632
19633// TruncatedNormalAttr is an optional argument to TruncatedNormal.
19634type TruncatedNormalAttr func(optionalAttr)
19635
19636// TruncatedNormalSeed sets the optional seed attribute to value.
19637//
19638// value: If either `seed` or `seed2` are set to be non-zero, the random number
19639// generator is seeded by the given seed.  Otherwise, it is seeded by a
19640// random seed.
19641// If not specified, defaults to 0
19642func TruncatedNormalSeed(value int64) TruncatedNormalAttr {
19643	return func(m optionalAttr) {
19644		m["seed"] = value
19645	}
19646}
19647
19648// TruncatedNormalSeed2 sets the optional seed2 attribute to value.
19649//
19650// value: A second seed to avoid seed collision.
19651// If not specified, defaults to 0
19652func TruncatedNormalSeed2(value int64) TruncatedNormalAttr {
19653	return func(m optionalAttr) {
19654		m["seed2"] = value
19655	}
19656}
19657
19658// Outputs random values from a truncated normal distribution.
19659//
19660// The generated values follow a normal distribution with mean 0 and standard
19661// deviation 1, except that values whose magnitude is more than 2 standard
19662// deviations from the mean are dropped and re-picked.
19663//
19664// Arguments:
19665//	shape: The shape of the output tensor.
19666//	dtype: The type of the output.
19667//
19668// Returns A tensor of the specified shape filled with random truncated normal
19669// values.
19670func TruncatedNormal(scope *Scope, shape tf.Output, dtype tf.DataType, optional ...TruncatedNormalAttr) (output tf.Output) {
19671	if scope.Err() != nil {
19672		return
19673	}
19674	attrs := map[string]interface{}{"dtype": dtype}
19675	for _, a := range optional {
19676		a(attrs)
19677	}
19678	opspec := tf.OpSpec{
19679		Type: "TruncatedNormal",
19680		Input: []tf.Input{
19681			shape,
19682		},
19683		Attrs: attrs,
19684	}
19685	op := scope.AddOperation(opspec)
19686	return op.Output(0)
19687}
19688
19689// StringToNumberAttr is an optional argument to StringToNumber.
19690type StringToNumberAttr func(optionalAttr)
19691
19692// StringToNumberOutType sets the optional out_type attribute to value.
19693//
19694// value: The numeric type to interpret each string in `string_tensor` as.
19695// If not specified, defaults to DT_FLOAT
19696func StringToNumberOutType(value tf.DataType) StringToNumberAttr {
19697	return func(m optionalAttr) {
19698		m["out_type"] = value
19699	}
19700}
19701
19702// Converts each string in the input Tensor to the specified numeric type.
19703//
19704// (Note that int32 overflow results in an error while float overflow
19705// results in a rounded value.)
19706//
19707// Returns A Tensor of the same shape as the input `string_tensor`.
19708func StringToNumber(scope *Scope, string_tensor tf.Output, optional ...StringToNumberAttr) (output tf.Output) {
19709	if scope.Err() != nil {
19710		return
19711	}
19712	attrs := map[string]interface{}{}
19713	for _, a := range optional {
19714		a(attrs)
19715	}
19716	opspec := tf.OpSpec{
19717		Type: "StringToNumber",
19718		Input: []tf.Input{
19719			string_tensor,
19720		},
19721		Attrs: attrs,
19722	}
19723	op := scope.AddOperation(opspec)
19724	return op.Output(0)
19725}
19726
19727// ResourceApplyFtrlV2Attr is an optional argument to ResourceApplyFtrlV2.
19728type ResourceApplyFtrlV2Attr func(optionalAttr)
19729
19730// ResourceApplyFtrlV2UseLocking sets the optional use_locking attribute to value.
19731//
19732// value: If `True`, updating of the var and accum tensors will be protected
19733// by a lock; otherwise the behavior is undefined, but may exhibit less
19734// contention.
19735// If not specified, defaults to false
19736func ResourceApplyFtrlV2UseLocking(value bool) ResourceApplyFtrlV2Attr {
19737	return func(m optionalAttr) {
19738		m["use_locking"] = value
19739	}
19740}
19741
19742// Update '*var' according to the Ftrl-proximal scheme.
19743//
19744// grad_with_shrinkage = grad + 2 * l2_shrinkage * var
19745// accum_new = accum + grad_with_shrinkage * grad_with_shrinkage
19746// linear += grad_with_shrinkage +
19747//     (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
19748// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
19749// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
19750// accum = accum_new
19751//
19752// Arguments:
19753//	var_: Should be from a Variable().
19754//	accum: Should be from a Variable().
19755//	linear: Should be from a Variable().
19756//	grad: The gradient.
19757//	lr: Scaling factor. Must be a scalar.
19758//	l1: L1 regulariation. Must be a scalar.
19759//	l2: L2 shrinkage regulariation. Must be a scalar.
19760//
19761//	lr_power: Scaling factor. Must be a scalar.
19762//
19763// Returns the created operation.
19764func ResourceApplyFtrlV2(scope *Scope, var_ tf.Output, accum tf.Output, linear tf.Output, grad tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, l2_shrinkage tf.Output, lr_power tf.Output, optional ...ResourceApplyFtrlV2Attr) (o *tf.Operation) {
19765	if scope.Err() != nil {
19766		return
19767	}
19768	attrs := map[string]interface{}{}
19769	for _, a := range optional {
19770		a(attrs)
19771	}
19772	opspec := tf.OpSpec{
19773		Type: "ResourceApplyFtrlV2",
19774		Input: []tf.Input{
19775			var_, accum, linear, grad, lr, l1, l2, l2_shrinkage, lr_power,
19776		},
19777		Attrs: attrs,
19778	}
19779	return scope.AddOperation(opspec)
19780}
19781
19782// SkipgramAttr is an optional argument to Skipgram.
19783type SkipgramAttr func(optionalAttr)
19784
19785// SkipgramWindowSize sets the optional window_size attribute to value.
19786//
19787// value: The number of words to predict to the left and right of the target.
19788// If not specified, defaults to 5
19789func SkipgramWindowSize(value int64) SkipgramAttr {
19790	return func(m optionalAttr) {
19791		m["window_size"] = value
19792	}
19793}
19794
19795// SkipgramMinCount sets the optional min_count attribute to value.
19796//
19797// value: The minimum number of word occurrences for it to be included in the
19798// vocabulary.
19799// If not specified, defaults to 5
19800func SkipgramMinCount(value int64) SkipgramAttr {
19801	return func(m optionalAttr) {
19802		m["min_count"] = value
19803	}
19804}
19805
19806// SkipgramSubsample sets the optional subsample attribute to value.
19807//
19808// value: Threshold for word occurrence. Words that appear with higher
19809// frequency will be randomly down-sampled. Set to 0 to disable.
19810// If not specified, defaults to 0.001
19811func SkipgramSubsample(value float32) SkipgramAttr {
19812	return func(m optionalAttr) {
19813		m["subsample"] = value
19814	}
19815}
19816
19817// Parses a text file and creates a batch of examples.
19818//
19819// DEPRECATED at GraphDef version 19: Moving word2vec into tensorflow_models/tutorials and deprecating its ops here as a result
19820//
19821// Arguments:
19822//	filename: The corpus's text file name.
19823//	batch_size: The size of produced batch.
19824//
19825// Returns A vector of words in the corpus.Frequencies of words. Sorted in the non-ascending order.Number of words per epoch in the data file.The current epoch number.The total number of words processed so far.A vector of word ids.A vector of word ids.
19826func Skipgram(scope *Scope, filename string, batch_size int64, optional ...SkipgramAttr) (vocab_word tf.Output, vocab_freq tf.Output, words_per_epoch tf.Output, current_epoch tf.Output, total_words_processed tf.Output, examples tf.Output, labels tf.Output) {
19827	if scope.Err() != nil {
19828		return
19829	}
19830	attrs := map[string]interface{}{"filename": filename, "batch_size": batch_size}
19831	for _, a := range optional {
19832		a(attrs)
19833	}
19834	opspec := tf.OpSpec{
19835		Type: "Skipgram",
19836
19837		Attrs: attrs,
19838	}
19839	op := scope.AddOperation(opspec)
19840	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4), op.Output(5), op.Output(6)
19841}
19842
19843// ThreadUnsafeUnigramCandidateSamplerAttr is an optional argument to ThreadUnsafeUnigramCandidateSampler.
19844type ThreadUnsafeUnigramCandidateSamplerAttr func(optionalAttr)
19845
19846// ThreadUnsafeUnigramCandidateSamplerSeed sets the optional seed attribute to value.
19847//
19848// value: If either seed or seed2 are set to be non-zero, the random number
19849// generator is seeded by the given seed.  Otherwise, it is seeded by a
19850// random seed.
19851// If not specified, defaults to 0
19852func ThreadUnsafeUnigramCandidateSamplerSeed(value int64) ThreadUnsafeUnigramCandidateSamplerAttr {
19853	return func(m optionalAttr) {
19854		m["seed"] = value
19855	}
19856}
19857
19858// ThreadUnsafeUnigramCandidateSamplerSeed2 sets the optional seed2 attribute to value.
19859//
19860// value: An second seed to avoid seed collision.
19861// If not specified, defaults to 0
19862func ThreadUnsafeUnigramCandidateSamplerSeed2(value int64) ThreadUnsafeUnigramCandidateSamplerAttr {
19863	return func(m optionalAttr) {
19864		m["seed2"] = value
19865	}
19866}
19867
19868// Generates labels for candidate sampling with a learned unigram distribution.
19869//
19870// See explanations of candidate sampling and the data formats at
19871// go/candidate-sampling.
19872//
19873// For each batch, this op picks a single set of sampled candidate labels.
19874//
19875// The advantages of sampling candidates per-batch are simplicity and the
19876// possibility of efficient dense matrix multiplication. The disadvantage is that
19877// the sampled candidates must be chosen independently of the context and of the
19878// true labels.
19879//
19880// Arguments:
19881//	true_classes: A batch_size * num_true matrix, in which each row contains the
19882// IDs of the num_true target_classes in the corresponding original label.
19883//	num_true: Number of true labels per context.
19884//	num_sampled: Number of candidates to randomly sample.
19885//	unique: If unique is true, we sample with rejection, so that all sampled
19886// candidates in a batch are unique. This requires some approximation to
19887// estimate the post-rejection sampling probabilities.
19888//	range_max: The sampler will sample integers from the interval [0, range_max).
19889//
19890// Returns A vector of length num_sampled, in which each element is
19891// the ID of a sampled candidate.A batch_size * num_true matrix, representing
19892// the number of times each candidate is expected to occur in a batch
19893// of sampled candidates. If unique=true, then this is a probability.A vector of length num_sampled, for each sampled
19894// candidate representing the number of times the candidate is expected
19895// to occur in a batch of sampled candidates.  If unique=true, then this is a
19896// probability.
19897func ThreadUnsafeUnigramCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, range_max int64, optional ...ThreadUnsafeUnigramCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output) {
19898	if scope.Err() != nil {
19899		return
19900	}
19901	attrs := map[string]interface{}{"num_true": num_true, "num_sampled": num_sampled, "unique": unique, "range_max": range_max}
19902	for _, a := range optional {
19903		a(attrs)
19904	}
19905	opspec := tf.OpSpec{
19906		Type: "ThreadUnsafeUnigramCandidateSampler",
19907		Input: []tf.Input{
19908			true_classes,
19909		},
19910		Attrs: attrs,
19911	}
19912	op := scope.AddOperation(opspec)
19913	return op.Output(0), op.Output(1), op.Output(2)
19914}
19915
19916// MaxPoolV2Attr is an optional argument to MaxPoolV2.
19917type MaxPoolV2Attr func(optionalAttr)
19918
19919// MaxPoolV2DataFormat sets the optional data_format attribute to value.
19920//
19921// value: Specify the data format of the input and output data. With the
19922// default format "NHWC", the data is stored in the order of:
19923//     [batch, in_height, in_width, in_channels].
19924// Alternatively, the format could be "NCHW", the data storage order of:
19925//     [batch, in_channels, in_height, in_width].
19926// If not specified, defaults to "NHWC"
19927func MaxPoolV2DataFormat(value string) MaxPoolV2Attr {
19928	return func(m optionalAttr) {
19929		m["data_format"] = value
19930	}
19931}
19932
19933// Performs max pooling on the input.
19934//
19935// Arguments:
19936//	input: 4-D input to pool over.
19937//	ksize: The size of the window for each dimension of the input tensor.
19938//	strides: The stride of the sliding window for each dimension of the
19939// input tensor.
19940//	padding: The type of padding algorithm to use.
19941//
19942// Returns The max pooled output tensor.
19943func MaxPoolV2(scope *Scope, input tf.Output, ksize tf.Output, strides tf.Output, padding string, optional ...MaxPoolV2Attr) (output tf.Output) {
19944	if scope.Err() != nil {
19945		return
19946	}
19947	attrs := map[string]interface{}{"padding": padding}
19948	for _, a := range optional {
19949		a(attrs)
19950	}
19951	opspec := tf.OpSpec{
19952		Type: "MaxPoolV2",
19953		Input: []tf.Input{
19954			input, ksize, strides,
19955		},
19956		Attrs: attrs,
19957	}
19958	op := scope.AddOperation(opspec)
19959	return op.Output(0)
19960}
19961
19962// Does nothing. Serves as a control trigger for scheduling.
19963//
19964// Only useful as a placeholder for control edges.
19965//
19966// Returns the created operation.
19967func ControlTrigger(scope *Scope) (o *tf.Operation) {
19968	if scope.Err() != nil {
19969		return
19970	}
19971	opspec := tf.OpSpec{
19972		Type: "ControlTrigger",
19973	}
19974	return scope.AddOperation(opspec)
19975}
19976
19977// Deprecated. Use TensorArrayReadV3
19978//
19979// DEPRECATED at GraphDef version 26: Use TensorArrayReadV3
19980func TensorArrayReadV2(scope *Scope, handle tf.Output, index tf.Output, flow_in tf.Output, dtype tf.DataType) (value tf.Output) {
19981	if scope.Err() != nil {
19982		return
19983	}
19984	attrs := map[string]interface{}{"dtype": dtype}
19985	opspec := tf.OpSpec{
19986		Type: "TensorArrayReadV2",
19987		Input: []tf.Input{
19988			handle, index, flow_in,
19989		},
19990		Attrs: attrs,
19991	}
19992	op := scope.AddOperation(opspec)
19993	return op.Output(0)
19994}
19995
19996// Batch normalization.
19997//
19998// DEPRECATED at GraphDef version 9: Use tf.nn.batch_normalization()
19999//
20000// This op is deprecated. Prefer `tf.nn.batch_normalization`.
20001//
20002// Arguments:
20003//	t: A 4D input Tensor.
20004//	m: A 1D mean Tensor with size matching the last dimension of t.
20005// This is the first output from tf.nn.moments,
20006// or a saved moving average thereof.
20007//	v: A 1D variance Tensor with size matching the last dimension of t.
20008// This is the second output from tf.nn.moments,
20009// or a saved moving average thereof.
20010//	beta: A 1D beta Tensor with size matching the last dimension of t.
20011// An offset to be added to the normalized tensor.
20012//	gamma: A 1D gamma Tensor with size matching the last dimension of t.
20013// If "scale_after_normalization" is true, this tensor will be multiplied
20014// with the normalized tensor.
20015//	variance_epsilon: A small float number to avoid dividing by 0.
20016//	scale_after_normalization: A bool indicating whether the resulted tensor
20017// needs to be multiplied with gamma.
20018func BatchNormWithGlobalNormalization(scope *Scope, t tf.Output, m tf.Output, v tf.Output, beta tf.Output, gamma tf.Output, variance_epsilon float32, scale_after_normalization bool) (result tf.Output) {
20019	if scope.Err() != nil {
20020		return
20021	}
20022	attrs := map[string]interface{}{"variance_epsilon": variance_epsilon, "scale_after_normalization": scale_after_normalization}
20023	opspec := tf.OpSpec{
20024		Type: "BatchNormWithGlobalNormalization",
20025		Input: []tf.Input{
20026			t, m, v, beta, gamma,
20027		},
20028		Attrs: attrs,
20029	}
20030	op := scope.AddOperation(opspec)
20031	return op.Output(0)
20032}
20033
20034// AddManySparseToTensorsMapAttr is an optional argument to AddManySparseToTensorsMap.
20035type AddManySparseToTensorsMapAttr func(optionalAttr)
20036
20037// AddManySparseToTensorsMapContainer sets the optional container attribute to value.
20038//
20039// value: The container name for the `SparseTensorsMap` created by this op.
20040// If not specified, defaults to ""
20041func AddManySparseToTensorsMapContainer(value string) AddManySparseToTensorsMapAttr {
20042	return func(m optionalAttr) {
20043		m["container"] = value
20044	}
20045}
20046
20047// AddManySparseToTensorsMapSharedName sets the optional shared_name attribute to value.
20048//
20049// value: The shared name for the `SparseTensorsMap` created by this op.
20050// If blank, the new Operation's unique name is used.
20051// If not specified, defaults to ""
20052func AddManySparseToTensorsMapSharedName(value string) AddManySparseToTensorsMapAttr {
20053	return func(m optionalAttr) {
20054		m["shared_name"] = value
20055	}
20056}
20057
20058// Add an `N`-minibatch `SparseTensor` to a `SparseTensorsMap`, return `N` handles.
20059//
20060// A `SparseTensor` of rank `R` is represented by three tensors: `sparse_indices`,
20061// `sparse_values`, and `sparse_shape`, where
20062//
20063// ```sparse_indices.shape[1] == sparse_shape.shape[0] == R```
20064//
20065// An `N`-minibatch of `SparseTensor` objects is represented as a `SparseTensor`
20066// having a first `sparse_indices` column taking values between `[0, N)`, where
20067// the minibatch size `N == sparse_shape[0]`.
20068//
20069// The input `SparseTensor` must have rank `R` greater than 1, and the first
20070// dimension is treated as the minibatch dimension.  Elements of the `SparseTensor`
20071// must be sorted in increasing order of this first dimension.  The stored
20072// `SparseTensor` objects pointed to by each row of the output `sparse_handles`
20073// will have rank `R-1`.
20074//
20075// The `SparseTensor` values can then be read out as part of a minibatch by passing
20076// the given keys as vector elements to `TakeManySparseFromTensorsMap`.  To ensure
20077// the correct `SparseTensorsMap` is accessed, ensure that the same
20078// `container` and `shared_name` are passed to that Op.  If no `shared_name`
20079// is provided here, instead use the *name* of the Operation created by calling
20080// `AddManySparseToTensorsMap` as the `shared_name` passed to
20081// `TakeManySparseFromTensorsMap`.  Ensure the Operations are colocated.
20082//
20083// Arguments:
20084//	sparse_indices: 2-D.  The `indices` of the minibatch `SparseTensor`.
20085// `sparse_indices[:, 0]` must be ordered values in `[0, N)`.
20086//	sparse_values: 1-D.  The `values` of the minibatch `SparseTensor`.
20087//	sparse_shape: 1-D.  The `shape` of the minibatch `SparseTensor`.
20088// The minibatch size `N == sparse_shape[0]`.
20089//
20090// Returns 1-D.  The handles of the `SparseTensor` now stored in the
20091// `SparseTensorsMap`.  Shape: `[N]`.
20092func AddManySparseToTensorsMap(scope *Scope, sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output, optional ...AddManySparseToTensorsMapAttr) (sparse_handles tf.Output) {
20093	if scope.Err() != nil {
20094		return
20095	}
20096	attrs := map[string]interface{}{}
20097	for _, a := range optional {
20098		a(attrs)
20099	}
20100	opspec := tf.OpSpec{
20101		Type: "AddManySparseToTensorsMap",
20102		Input: []tf.Input{
20103			sparse_indices, sparse_values, sparse_shape,
20104		},
20105		Attrs: attrs,
20106	}
20107	op := scope.AddOperation(opspec)
20108	return op.Output(0)
20109}
20110
20111// TPUReplicateMetadataAttr is an optional argument to TPUReplicateMetadata.
20112type TPUReplicateMetadataAttr func(optionalAttr)
20113
20114// TPUReplicateMetadataNumCoresPerReplica sets the optional num_cores_per_replica attribute to value.
20115//
20116// value: Number of cores per replica. Used for model parallelism.
20117// If not specified, defaults to 1
20118func TPUReplicateMetadataNumCoresPerReplica(value int64) TPUReplicateMetadataAttr {
20119	return func(m optionalAttr) {
20120		m["num_cores_per_replica"] = value
20121	}
20122}
20123
20124// TPUReplicateMetadataTopology sets the optional topology attribute to value.
20125//
20126// value: TopologyProto indicating the topology of the TPU pod slice.
20127// If not specified, defaults to ""
20128func TPUReplicateMetadataTopology(value string) TPUReplicateMetadataAttr {
20129	return func(m optionalAttr) {
20130		m["topology"] = value
20131	}
20132}
20133
20134// TPUReplicateMetadataUseTpu sets the optional use_tpu attribute to value.
20135//
20136// value: Whether to place the computation on the TPU.
20137// If not specified, defaults to true
20138func TPUReplicateMetadataUseTpu(value bool) TPUReplicateMetadataAttr {
20139	return func(m optionalAttr) {
20140		m["use_tpu"] = value
20141	}
20142}
20143
20144// TPUReplicateMetadataDeviceAssignment sets the optional device_assignment attribute to value.
20145//
20146// value: The assignment of devices for the computation.
20147// If not specified, defaults to <>
20148func TPUReplicateMetadataDeviceAssignment(value []int64) TPUReplicateMetadataAttr {
20149	return func(m optionalAttr) {
20150		m["device_assignment"] = value
20151	}
20152}
20153
20154// TPUReplicateMetadataComputationShape sets the optional computation_shape attribute to value.
20155//
20156// value: DEPRECATED. Use num_cores_per_replica instead.
20157// If not specified, defaults to <>
20158func TPUReplicateMetadataComputationShape(value []int64) TPUReplicateMetadataAttr {
20159	return func(m optionalAttr) {
20160		m["computation_shape"] = value
20161	}
20162}
20163
20164// TPUReplicateMetadataHostComputeCore sets the optional host_compute_core attribute to value.
20165// If not specified, defaults to <>
20166func TPUReplicateMetadataHostComputeCore(value []string) TPUReplicateMetadataAttr {
20167	return func(m optionalAttr) {
20168		m["host_compute_core"] = value
20169	}
20170}
20171
20172// TPUReplicateMetadataPaddingMap sets the optional padding_map attribute to value.
20173// If not specified, defaults to <>
20174func TPUReplicateMetadataPaddingMap(value []string) TPUReplicateMetadataAttr {
20175	return func(m optionalAttr) {
20176		m["padding_map"] = value
20177	}
20178}
20179
20180// TPUReplicateMetadataStepMarkerLocation sets the optional step_marker_location attribute to value.
20181// If not specified, defaults to "STEP_MARK_AT_ENTRY"
20182func TPUReplicateMetadataStepMarkerLocation(value string) TPUReplicateMetadataAttr {
20183	return func(m optionalAttr) {
20184		m["step_marker_location"] = value
20185	}
20186}
20187
20188// Metadata indicaitng how the TPU computation should be replicated.
20189//
20190// Arguments:
20191//	num_replicas: Number of replicas of the computation
20192//
20193// Returns the created operation.
20194func TPUReplicateMetadata(scope *Scope, num_replicas int64, optional ...TPUReplicateMetadataAttr) (o *tf.Operation) {
20195	if scope.Err() != nil {
20196		return
20197	}
20198	attrs := map[string]interface{}{"num_replicas": num_replicas}
20199	for _, a := range optional {
20200		a(attrs)
20201	}
20202	opspec := tf.OpSpec{
20203		Type: "TPUReplicateMetadata",
20204
20205		Attrs: attrs,
20206	}
20207	return scope.AddOperation(opspec)
20208}
20209
20210// LoadTPUEmbeddingFTRLParametersGradAccumDebugAttr is an optional argument to LoadTPUEmbeddingFTRLParametersGradAccumDebug.
20211type LoadTPUEmbeddingFTRLParametersGradAccumDebugAttr func(optionalAttr)
20212
20213// LoadTPUEmbeddingFTRLParametersGradAccumDebugTableId sets the optional table_id attribute to value.
20214// If not specified, defaults to -1
20215//
20216// REQUIRES: value >= -1
20217func LoadTPUEmbeddingFTRLParametersGradAccumDebugTableId(value int64) LoadTPUEmbeddingFTRLParametersGradAccumDebugAttr {
20218	return func(m optionalAttr) {
20219		m["table_id"] = value
20220	}
20221}
20222
20223// LoadTPUEmbeddingFTRLParametersGradAccumDebugTableName sets the optional table_name attribute to value.
20224// If not specified, defaults to ""
20225func LoadTPUEmbeddingFTRLParametersGradAccumDebugTableName(value string) LoadTPUEmbeddingFTRLParametersGradAccumDebugAttr {
20226	return func(m optionalAttr) {
20227		m["table_name"] = value
20228	}
20229}
20230
20231// Load FTRL embedding parameters with debug support.
20232//
20233// An op that loads optimization parameters into HBM for embedding. Must be
20234// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
20235// embedding table configuration. For example, this op is used to install
20236// parameters that are loaded from a checkpoint before a training loop is
20237// executed.
20238//
20239// Arguments:
20240//	parameters: Value of parameters used in the FTRL optimization algorithm.
20241//	accumulators: Value of accumulators used in the FTRL optimization algorithm.
20242//	linears: Value of linears used in the FTRL optimization algorithm.
20243//	gradient_accumulators: Value of gradient_accumulators used in the FTRL optimization algorithm.
20244//
20245//
20246//
20247// Returns the created operation.
20248func LoadTPUEmbeddingFTRLParametersGradAccumDebug(scope *Scope, parameters tf.Output, accumulators tf.Output, linears tf.Output, gradient_accumulators tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingFTRLParametersGradAccumDebugAttr) (o *tf.Operation) {
20249	if scope.Err() != nil {
20250		return
20251	}
20252	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
20253	for _, a := range optional {
20254		a(attrs)
20255	}
20256	opspec := tf.OpSpec{
20257		Type: "LoadTPUEmbeddingFTRLParametersGradAccumDebug",
20258		Input: []tf.Input{
20259			parameters, accumulators, linears, gradient_accumulators,
20260		},
20261		Attrs: attrs,
20262	}
20263	return scope.AddOperation(opspec)
20264}
20265
20266// Concatenates tensors along one dimension.
20267//
20268// Arguments:
20269//	values: List of `N` Tensors to concatenate. Their ranks and types must match,
20270// and their sizes must match in all dimensions except `concat_dim`.
20271//	axis: 0-D.  The dimension along which to concatenate.  Must be in the
20272// range [-rank(values), rank(values)).
20273//
20274// Returns A `Tensor` with the concatenation of values stacked along the
20275// `concat_dim` dimension.  This tensor's shape matches that of `values` except
20276// in `concat_dim` where it has the sum of the sizes.
20277func ConcatV2(scope *Scope, values []tf.Output, axis tf.Output) (output tf.Output) {
20278	if scope.Err() != nil {
20279		return
20280	}
20281	opspec := tf.OpSpec{
20282		Type: "ConcatV2",
20283		Input: []tf.Input{
20284			tf.OutputList(values), axis,
20285		},
20286	}
20287	op := scope.AddOperation(opspec)
20288	return op.Output(0)
20289}
20290
20291// Reads and outputs the entire contents of the input filename.
20292func ReadFile(scope *Scope, filename tf.Output) (contents tf.Output) {
20293	if scope.Err() != nil {
20294		return
20295	}
20296	opspec := tf.OpSpec{
20297		Type: "ReadFile",
20298		Input: []tf.Input{
20299			filename,
20300		},
20301	}
20302	op := scope.AddOperation(opspec)
20303	return op.Output(0)
20304}
20305
20306// AvgPoolGradAttr is an optional argument to AvgPoolGrad.
20307type AvgPoolGradAttr func(optionalAttr)
20308
20309// AvgPoolGradDataFormat sets the optional data_format attribute to value.
20310//
20311// value: Specify the data format of the input and output data. With the
20312// default format "NHWC", the data is stored in the order of:
20313//     [batch, in_height, in_width, in_channels].
20314// Alternatively, the format could be "NCHW", the data storage order of:
20315//     [batch, in_channels, in_height, in_width].
20316// If not specified, defaults to "NHWC"
20317func AvgPoolGradDataFormat(value string) AvgPoolGradAttr {
20318	return func(m optionalAttr) {
20319		m["data_format"] = value
20320	}
20321}
20322
20323// Computes gradients of the average pooling function.
20324//
20325// Arguments:
20326//	orig_input_shape: 1-D.  Shape of the original input to `avg_pool`.
20327//	grad: 4-D with shape `[batch, height, width, channels]`.  Gradients w.r.t.
20328// the output of `avg_pool`.
20329//	ksize: The size of the sliding window for each dimension of the input.
20330//	strides: The stride of the sliding window for each dimension of the input.
20331//	padding: The type of padding algorithm to use.
20332//
20333// Returns 4-D.  Gradients w.r.t. the input of `avg_pool`.
20334func AvgPoolGrad(scope *Scope, orig_input_shape tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...AvgPoolGradAttr) (output tf.Output) {
20335	if scope.Err() != nil {
20336		return
20337	}
20338	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
20339	for _, a := range optional {
20340		a(attrs)
20341	}
20342	opspec := tf.OpSpec{
20343		Type: "AvgPoolGrad",
20344		Input: []tf.Input{
20345			orig_input_shape, grad,
20346		},
20347		Attrs: attrs,
20348	}
20349	op := scope.AddOperation(opspec)
20350	return op.Output(0)
20351}
20352
20353// Greedily selects a subset of bounding boxes in descending order of score,
20354//
20355// pruning away boxes that have high overlaps
20356// with previously selected boxes.  Bounding boxes with score less than
20357// `score_threshold` are removed. N-by-n overlap values are supplied as square matrix,
20358// which allows for defining a custom overlap criterium (eg. intersection over union,
20359// intersection over area, etc.).
20360//
20361// The output of this operation is a set of integers indexing into the input
20362// collection of bounding boxes representing the selected boxes.  The bounding
20363// box coordinates corresponding to the selected indices can then be obtained
20364// using the `tf.gather operation`.  For example:
20365//
20366//   selected_indices = tf.image.non_max_suppression_with_overlaps(
20367//       overlaps, scores, max_output_size, overlap_threshold, score_threshold)
20368//   selected_boxes = tf.gather(boxes, selected_indices)
20369//
20370// Arguments:
20371//	overlaps: A 2-D float tensor of shape `[num_boxes, num_boxes]` representing
20372// the n-by-n box overlap values.
20373//	scores: A 1-D float tensor of shape `[num_boxes]` representing a single
20374// score corresponding to each box (each row of boxes).
20375//	max_output_size: A scalar integer tensor representing the maximum number of
20376// boxes to be selected by non max suppression.
20377//	overlap_threshold: A 0-D float tensor representing the threshold for deciding whether
20378// boxes overlap too.
20379//	score_threshold: A 0-D float tensor representing the threshold for deciding when to remove
20380// boxes based on score.
20381//
20382// Returns A 1-D integer tensor of shape `[M]` representing the selected
20383// indices from the boxes tensor, where `M <= max_output_size`.
20384func NonMaxSuppressionWithOverlaps(scope *Scope, overlaps tf.Output, scores tf.Output, max_output_size tf.Output, overlap_threshold tf.Output, score_threshold tf.Output) (selected_indices tf.Output) {
20385	if scope.Err() != nil {
20386		return
20387	}
20388	opspec := tf.OpSpec{
20389		Type: "NonMaxSuppressionWithOverlaps",
20390		Input: []tf.Input{
20391			overlaps, scores, max_output_size, overlap_threshold, score_threshold,
20392		},
20393	}
20394	op := scope.AddOperation(opspec)
20395	return op.Output(0)
20396}
20397
20398// FractionalAvgPoolGradAttr is an optional argument to FractionalAvgPoolGrad.
20399type FractionalAvgPoolGradAttr func(optionalAttr)
20400
20401// FractionalAvgPoolGradOverlapping sets the optional overlapping attribute to value.
20402//
20403// value: When set to True, it means when pooling, the values at the boundary
20404// of adjacent pooling cells are used by both cells. For example:
20405//
20406// `index  0  1  2  3  4`
20407//
20408// `value  20 5  16 3  7`
20409//
20410// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
20411// The result would be [41/3, 26/3] for fractional avg pooling.
20412// If not specified, defaults to false
20413func FractionalAvgPoolGradOverlapping(value bool) FractionalAvgPoolGradAttr {
20414	return func(m optionalAttr) {
20415		m["overlapping"] = value
20416	}
20417}
20418
20419// Computes gradient of the FractionalAvgPool function.
20420//
20421// Unlike FractionalMaxPoolGrad, we don't need to find arg_max for
20422// FractionalAvgPoolGrad, we just need to evenly back-propagate each element of
20423// out_backprop to those indices that form the same pooling cell. Therefore, we
20424// just need to know the shape of original input tensor, instead of the whole
20425// tensor.
20426//
20427// Arguments:
20428//	orig_input_tensor_shape: Original input tensor shape for `fractional_avg_pool`
20429//	out_backprop: 4-D with shape `[batch, height, width, channels]`.  Gradients
20430// w.r.t. the output of `fractional_avg_pool`.
20431//	row_pooling_sequence: row pooling sequence, form pooling region with
20432// col_pooling_sequence.
20433//	col_pooling_sequence: column pooling sequence, form pooling region with
20434// row_pooling sequence.
20435//
20436// Returns 4-D.  Gradients w.r.t. the input of `fractional_avg_pool`.
20437func FractionalAvgPoolGrad(scope *Scope, orig_input_tensor_shape tf.Output, out_backprop tf.Output, row_pooling_sequence tf.Output, col_pooling_sequence tf.Output, optional ...FractionalAvgPoolGradAttr) (output tf.Output) {
20438	if scope.Err() != nil {
20439		return
20440	}
20441	attrs := map[string]interface{}{}
20442	for _, a := range optional {
20443		a(attrs)
20444	}
20445	opspec := tf.OpSpec{
20446		Type: "FractionalAvgPoolGrad",
20447		Input: []tf.Input{
20448			orig_input_tensor_shape, out_backprop, row_pooling_sequence, col_pooling_sequence,
20449		},
20450		Attrs: attrs,
20451	}
20452	op := scope.AddOperation(opspec)
20453	return op.Output(0)
20454}
20455
20456// StaticRegexReplaceAttr is an optional argument to StaticRegexReplace.
20457type StaticRegexReplaceAttr func(optionalAttr)
20458
20459// StaticRegexReplaceReplaceGlobal sets the optional replace_global attribute to value.
20460//
20461// value: If True, the replacement is global, otherwise the replacement
20462// is done only on the first match.
20463// If not specified, defaults to true
20464func StaticRegexReplaceReplaceGlobal(value bool) StaticRegexReplaceAttr {
20465	return func(m optionalAttr) {
20466		m["replace_global"] = value
20467	}
20468}
20469
20470// Replaces the match of pattern in input with rewrite.
20471//
20472// It follows the re2 syntax (https://github.com/google/re2/wiki/Syntax)
20473//
20474// Arguments:
20475//	input: The text to be processed.
20476//	pattern: The regular expression to match the input.
20477//	rewrite: The rewrite to be applied to the matched expression.
20478//
20479// Returns The text after applying pattern and rewrite.
20480func StaticRegexReplace(scope *Scope, input tf.Output, pattern string, rewrite string, optional ...StaticRegexReplaceAttr) (output tf.Output) {
20481	if scope.Err() != nil {
20482		return
20483	}
20484	attrs := map[string]interface{}{"pattern": pattern, "rewrite": rewrite}
20485	for _, a := range optional {
20486		a(attrs)
20487	}
20488	opspec := tf.OpSpec{
20489		Type: "StaticRegexReplace",
20490		Input: []tf.Input{
20491			input,
20492		},
20493		Attrs: attrs,
20494	}
20495	op := scope.AddOperation(opspec)
20496	return op.Output(0)
20497}
20498
20499// Computes gradients for the exponential linear (Elu) operation.
20500//
20501// Arguments:
20502//	gradients: The backpropagated gradients to the corresponding Elu operation.
20503//	outputs: The outputs of the corresponding Elu operation.
20504//
20505// Returns The gradients: `gradients * (outputs + 1)` if outputs < 0,
20506// `gradients` otherwise.
20507func EluGrad(scope *Scope, gradients tf.Output, outputs tf.Output) (backprops tf.Output) {
20508	if scope.Err() != nil {
20509		return
20510	}
20511	opspec := tf.OpSpec{
20512		Type: "EluGrad",
20513		Input: []tf.Input{
20514			gradients, outputs,
20515		},
20516	}
20517	op := scope.AddOperation(opspec)
20518	return op.Output(0)
20519}
20520
20521// Converts each string in the input Tensor to its hash mod by a number of buckets.
20522//
20523// The hash function is deterministic on the content of the string within the
20524// process.
20525//
20526// Note that the hash function may change from time to time.
20527// This functionality will be deprecated and it's recommended to use
20528// `tf.string_to_hash_bucket_fast()` or `tf.string_to_hash_bucket_strong()`.
20529//
20530// Arguments:
20531//
20532//	num_buckets: The number of buckets.
20533//
20534// Returns A Tensor of the same shape as the input `string_tensor`.
20535func StringToHashBucket(scope *Scope, string_tensor tf.Output, num_buckets int64) (output tf.Output) {
20536	if scope.Err() != nil {
20537		return
20538	}
20539	attrs := map[string]interface{}{"num_buckets": num_buckets}
20540	opspec := tf.OpSpec{
20541		Type: "StringToHashBucket",
20542		Input: []tf.Input{
20543			string_tensor,
20544		},
20545		Attrs: attrs,
20546	}
20547	op := scope.AddOperation(opspec)
20548	return op.Output(0)
20549}
20550
20551// Creates a dataset that batches `batch_size` elements from `input_dataset`.
20552//
20553// Arguments:
20554//
20555//	batch_size: A scalar representing the number of elements to accumulate in a batch.
20556//	drop_remainder: A scalar representing whether the last batch should be dropped in case its size
20557// is smaller than desired.
20558//
20559//
20560func BatchDatasetV2(scope *Scope, input_dataset tf.Output, batch_size tf.Output, drop_remainder tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
20561	if scope.Err() != nil {
20562		return
20563	}
20564	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
20565	opspec := tf.OpSpec{
20566		Type: "BatchDatasetV2",
20567		Input: []tf.Input{
20568			input_dataset, batch_size, drop_remainder,
20569		},
20570		Attrs: attrs,
20571	}
20572	op := scope.AddOperation(opspec)
20573	return op.Output(0)
20574}
20575
20576// Computes the gradient of `igamma(a, x)` wrt `a`.
20577func IgammaGradA(scope *Scope, a tf.Output, x tf.Output) (z tf.Output) {
20578	if scope.Err() != nil {
20579		return
20580	}
20581	opspec := tf.OpSpec{
20582		Type: "IgammaGradA",
20583		Input: []tf.Input{
20584			a, x,
20585		},
20586	}
20587	op := scope.AddOperation(opspec)
20588	return op.Output(0)
20589}
20590
20591// Creates a dataset that contains `count` elements from the `input_dataset`.
20592//
20593// Arguments:
20594//
20595//	count: A scalar representing the number of elements from the `input_dataset`
20596// that should be taken. A value of `-1` indicates that all of `input_dataset`
20597// is taken.
20598//
20599//
20600func TakeDataset(scope *Scope, input_dataset tf.Output, count tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
20601	if scope.Err() != nil {
20602		return
20603	}
20604	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
20605	opspec := tf.OpSpec{
20606		Type: "TakeDataset",
20607		Input: []tf.Input{
20608			input_dataset, count,
20609		},
20610		Attrs: attrs,
20611	}
20612	op := scope.AddOperation(opspec)
20613	return op.Output(0)
20614}
20615
20616// FakeQuantWithMinMaxVarsAttr is an optional argument to FakeQuantWithMinMaxVars.
20617type FakeQuantWithMinMaxVarsAttr func(optionalAttr)
20618
20619// FakeQuantWithMinMaxVarsNumBits sets the optional num_bits attribute to value.
20620// If not specified, defaults to 8
20621func FakeQuantWithMinMaxVarsNumBits(value int64) FakeQuantWithMinMaxVarsAttr {
20622	return func(m optionalAttr) {
20623		m["num_bits"] = value
20624	}
20625}
20626
20627// FakeQuantWithMinMaxVarsNarrowRange sets the optional narrow_range attribute to value.
20628// If not specified, defaults to false
20629func FakeQuantWithMinMaxVarsNarrowRange(value bool) FakeQuantWithMinMaxVarsAttr {
20630	return func(m optionalAttr) {
20631		m["narrow_range"] = value
20632	}
20633}
20634
20635// Fake-quantize the 'inputs' tensor of type float via global float scalars `min`
20636//
20637// and `max` to 'outputs' tensor of same shape as `inputs`.
20638//
20639// `[min; max]` define the clamping range for the `inputs` data.
20640// `inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]`
20641// when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and
20642// then de-quantized and output as floats in `[min; max]` interval.
20643// `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.
20644//
20645// This operation has a gradient and thus allows for training `min` and `max`
20646// values.
20647func FakeQuantWithMinMaxVars(scope *Scope, inputs tf.Output, min tf.Output, max tf.Output, optional ...FakeQuantWithMinMaxVarsAttr) (outputs tf.Output) {
20648	if scope.Err() != nil {
20649		return
20650	}
20651	attrs := map[string]interface{}{}
20652	for _, a := range optional {
20653		a(attrs)
20654	}
20655	opspec := tf.OpSpec{
20656		Type: "FakeQuantWithMinMaxVars",
20657		Input: []tf.Input{
20658			inputs, min, max,
20659		},
20660		Attrs: attrs,
20661	}
20662	op := scope.AddOperation(opspec)
20663	return op.Output(0)
20664}
20665
20666// RetrieveTPUEmbeddingMomentumParametersAttr is an optional argument to RetrieveTPUEmbeddingMomentumParameters.
20667type RetrieveTPUEmbeddingMomentumParametersAttr func(optionalAttr)
20668
20669// RetrieveTPUEmbeddingMomentumParametersTableId sets the optional table_id attribute to value.
20670// If not specified, defaults to -1
20671//
20672// REQUIRES: value >= -1
20673func RetrieveTPUEmbeddingMomentumParametersTableId(value int64) RetrieveTPUEmbeddingMomentumParametersAttr {
20674	return func(m optionalAttr) {
20675		m["table_id"] = value
20676	}
20677}
20678
20679// RetrieveTPUEmbeddingMomentumParametersTableName sets the optional table_name attribute to value.
20680// If not specified, defaults to ""
20681func RetrieveTPUEmbeddingMomentumParametersTableName(value string) RetrieveTPUEmbeddingMomentumParametersAttr {
20682	return func(m optionalAttr) {
20683		m["table_name"] = value
20684	}
20685}
20686
20687// Retrieve Momentum embedding parameters.
20688//
20689// An op that retrieves optimization parameters from embedding to host
20690// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
20691// the correct embedding table configuration. For example, this op is
20692// used to retrieve updated parameters before saving a checkpoint.
20693//
20694// Returns Parameter parameters updated by the Momentum optimization algorithm.Parameter momenta updated by the Momentum optimization algorithm.
20695func RetrieveTPUEmbeddingMomentumParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingMomentumParametersAttr) (parameters tf.Output, momenta tf.Output) {
20696	if scope.Err() != nil {
20697		return
20698	}
20699	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
20700	for _, a := range optional {
20701		a(attrs)
20702	}
20703	opspec := tf.OpSpec{
20704		Type: "RetrieveTPUEmbeddingMomentumParameters",
20705
20706		Attrs: attrs,
20707	}
20708	op := scope.AddOperation(opspec)
20709	return op.Output(0), op.Output(1)
20710}
20711
20712// Forwards the value of an available tensor from `inputs` to `output`.
20713//
20714// `Merge` waits for at least one of the tensors in `inputs` to become available.
20715// It is usually combined with `Switch` to implement branching.
20716//
20717// `Merge` forwards the first tensor to become available to `output`, and sets
20718// `value_index` to its index in `inputs`.
20719//
20720// Arguments:
20721//	inputs: The input tensors, exactly one of which will become available.
20722//
20723// Returns Will be set to the available input tensor.The index of the chosen input tensor in `inputs`.
20724func Merge(scope *Scope, inputs []tf.Output) (output tf.Output, value_index tf.Output) {
20725	if scope.Err() != nil {
20726		return
20727	}
20728	opspec := tf.OpSpec{
20729		Type: "Merge",
20730		Input: []tf.Input{
20731			tf.OutputList(inputs),
20732		},
20733	}
20734	op := scope.AddOperation(opspec)
20735	return op.Output(0), op.Output(1)
20736}
20737
20738// QueueCloseV2Attr is an optional argument to QueueCloseV2.
20739type QueueCloseV2Attr func(optionalAttr)
20740
20741// QueueCloseV2CancelPendingEnqueues sets the optional cancel_pending_enqueues attribute to value.
20742//
20743// value: If true, all pending enqueue requests that are
20744// blocked on the given queue will be canceled.
20745// If not specified, defaults to false
20746func QueueCloseV2CancelPendingEnqueues(value bool) QueueCloseV2Attr {
20747	return func(m optionalAttr) {
20748		m["cancel_pending_enqueues"] = value
20749	}
20750}
20751
20752// Closes the given queue.
20753//
20754// This operation signals that no more elements will be enqueued in the
20755// given queue. Subsequent Enqueue(Many) operations will fail.
20756// Subsequent Dequeue(Many) operations will continue to succeed if
20757// sufficient elements remain in the queue. Subsequent Dequeue(Many)
20758// operations that would block will fail immediately.
20759//
20760// Arguments:
20761//	handle: The handle to a queue.
20762//
20763// Returns the created operation.
20764func QueueCloseV2(scope *Scope, handle tf.Output, optional ...QueueCloseV2Attr) (o *tf.Operation) {
20765	if scope.Err() != nil {
20766		return
20767	}
20768	attrs := map[string]interface{}{}
20769	for _, a := range optional {
20770		a(attrs)
20771	}
20772	opspec := tf.OpSpec{
20773		Type: "QueueCloseV2",
20774		Input: []tf.Input{
20775			handle,
20776		},
20777		Attrs: attrs,
20778	}
20779	return scope.AddOperation(opspec)
20780}
20781
20782// Writes the given dataset to the given file using the TFRecord format.
20783//
20784// Arguments:
20785//	input_dataset: A variant tensor representing the dataset to write.
20786//	filename: A scalar string tensor representing the filename to use.
20787//	compression_type: A scalar string tensor containing either (i) the empty string (no
20788// compression), (ii) "ZLIB", or (iii) "GZIP".
20789//
20790// Returns the created operation.
20791func ExperimentalDatasetToTFRecord(scope *Scope, input_dataset tf.Output, filename tf.Output, compression_type tf.Output) (o *tf.Operation) {
20792	if scope.Err() != nil {
20793		return
20794	}
20795	opspec := tf.OpSpec{
20796		Type: "ExperimentalDatasetToTFRecord",
20797		Input: []tf.Input{
20798			input_dataset, filename, compression_type,
20799		},
20800	}
20801	return scope.AddOperation(opspec)
20802}
20803
20804// BiasAddGradAttr is an optional argument to BiasAddGrad.
20805type BiasAddGradAttr func(optionalAttr)
20806
20807// BiasAddGradDataFormat sets the optional data_format attribute to value.
20808//
20809// value: Specify the data format of the input and output data. With the
20810// default format "NHWC", the bias tensor will be added to the last dimension
20811// of the value tensor.
20812// Alternatively, the format could be "NCHW", the data storage order of:
20813//     [batch, in_channels, in_height, in_width].
20814// The tensor will be added to "in_channels", the third-to-the-last
20815//     dimension.
20816// If not specified, defaults to "NHWC"
20817func BiasAddGradDataFormat(value string) BiasAddGradAttr {
20818	return func(m optionalAttr) {
20819		m["data_format"] = value
20820	}
20821}
20822
20823// The backward operation for "BiasAdd" on the "bias" tensor.
20824//
20825// It accumulates all the values from out_backprop into the feature dimension.
20826// For NHWC data format, the feature dimension is the last. For NCHW data format,
20827// the feature dimension is the third-to-last.
20828//
20829// Arguments:
20830//	out_backprop: Any number of dimensions.
20831//
20832// Returns 1-D with size the feature dimension of `out_backprop`.
20833func BiasAddGrad(scope *Scope, out_backprop tf.Output, optional ...BiasAddGradAttr) (output tf.Output) {
20834	if scope.Err() != nil {
20835		return
20836	}
20837	attrs := map[string]interface{}{}
20838	for _, a := range optional {
20839		a(attrs)
20840	}
20841	opspec := tf.OpSpec{
20842		Type: "BiasAddGrad",
20843		Input: []tf.Input{
20844			out_backprop,
20845		},
20846		Attrs: attrs,
20847	}
20848	op := scope.AddOperation(opspec)
20849	return op.Output(0)
20850}
20851
20852// Reduces `input` from `num_devices` using `reduction` to a single device.
20853//
20854// Reduces `input` from `num_devices` using `reduction` to a single device.
20855//
20856// The graph should be constructed so that all inputs have a valid device
20857// assignment, and the op itself is assigned one of these devices.
20858//
20859// input: The input to the reduction.
20860// data: the value of the reduction across all `num_devices` devices.
20861// reduction: the reduction operation to perform.
20862func NcclReduce(scope *Scope, input []tf.Output, reduction string) (data tf.Output) {
20863	if scope.Err() != nil {
20864		return
20865	}
20866	attrs := map[string]interface{}{"reduction": reduction}
20867	opspec := tf.OpSpec{
20868		Type: "NcclReduce",
20869		Input: []tf.Input{
20870			tf.OutputList(input),
20871		},
20872		Attrs: attrs,
20873	}
20874	op := scope.AddOperation(opspec)
20875	return op.Output(0)
20876}
20877
20878// Computes the gradient of morphological 2-D dilation with respect to the input.
20879//
20880// Arguments:
20881//	input: 4-D with shape `[batch, in_height, in_width, depth]`.
20882//	filter: 3-D with shape `[filter_height, filter_width, depth]`.
20883//	out_backprop: 4-D with shape `[batch, out_height, out_width, depth]`.
20884//	strides: 1-D of length 4. The stride of the sliding window for each dimension of
20885// the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
20886//	rates: 1-D of length 4. The input stride for atrous morphological dilation.
20887// Must be: `[1, rate_height, rate_width, 1]`.
20888//	padding: The type of padding algorithm to use.
20889//
20890// Returns 4-D with shape `[batch, in_height, in_width, depth]`.
20891func Dilation2DBackpropInput(scope *Scope, input tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, rates []int64, padding string) (in_backprop tf.Output) {
20892	if scope.Err() != nil {
20893		return
20894	}
20895	attrs := map[string]interface{}{"strides": strides, "rates": rates, "padding": padding}
20896	opspec := tf.OpSpec{
20897		Type: "Dilation2DBackpropInput",
20898		Input: []tf.Input{
20899			input, filter, out_backprop,
20900		},
20901		Attrs: attrs,
20902	}
20903	op := scope.AddOperation(opspec)
20904	return op.Output(0)
20905}
20906
20907// An Op to sum inputs across replicated TPU instances.
20908//
20909// Each instance supplies its own input.
20910//
20911// For example, suppose there are 8 TPU instances: `[A, B, C, D, E, F, G, H]`.
20912// Passing group_assignment=`[[0,2,4,6],[1,3,5,7]]` sets `A, C, E, G` as group 0,
20913// and `B, D, F, H` as group 1. Thus we get the outputs:
20914// `[A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H]`.
20915//
20916// Arguments:
20917//	input: The local input to the sum.
20918//	group_assignment: An int32 tensor with shape
20919// [num_groups, num_replicas_per_group]. `group_assignment[i]` represents the
20920// replica ids in the ith subgroup.
20921//
20922// Returns The sum of all the distributed inputs.
20923func CrossReplicaSum(scope *Scope, input tf.Output, group_assignment tf.Output) (output tf.Output) {
20924	if scope.Err() != nil {
20925		return
20926	}
20927	opspec := tf.OpSpec{
20928		Type: "CrossReplicaSum",
20929		Input: []tf.Input{
20930			input, group_assignment,
20931		},
20932	}
20933	op := scope.AddOperation(opspec)
20934	return op.Output(0)
20935}
20936
20937// ResourceSparseApplyMomentumAttr is an optional argument to ResourceSparseApplyMomentum.
20938type ResourceSparseApplyMomentumAttr func(optionalAttr)
20939
20940// ResourceSparseApplyMomentumUseLocking sets the optional use_locking attribute to value.
20941//
20942// value: If `True`, updating of the var and accum tensors will be protected
20943// by a lock; otherwise the behavior is undefined, but may exhibit less
20944// contention.
20945// If not specified, defaults to false
20946func ResourceSparseApplyMomentumUseLocking(value bool) ResourceSparseApplyMomentumAttr {
20947	return func(m optionalAttr) {
20948		m["use_locking"] = value
20949	}
20950}
20951
20952// ResourceSparseApplyMomentumUseNesterov sets the optional use_nesterov attribute to value.
20953//
20954// value: If `True`, the tensor passed to compute grad will be
20955// var - lr * momentum * accum, so in the end, the var you get is actually
20956// var - lr * momentum * accum.
20957// If not specified, defaults to false
20958func ResourceSparseApplyMomentumUseNesterov(value bool) ResourceSparseApplyMomentumAttr {
20959	return func(m optionalAttr) {
20960		m["use_nesterov"] = value
20961	}
20962}
20963
20964// Update relevant entries in '*var' and '*accum' according to the momentum scheme.
20965//
20966// Set use_nesterov = True if you want to use Nesterov momentum.
20967//
20968// That is for rows we have grad for, we update var and accum as follows:
20969//
20970// accum = accum * momentum + grad
20971// var -= lr * accum
20972//
20973// Arguments:
20974//	var_: Should be from a Variable().
20975//	accum: Should be from a Variable().
20976//	lr: Learning rate. Must be a scalar.
20977//	grad: The gradient.
20978//	indices: A vector of indices into the first dimension of var and accum.
20979//	momentum: Momentum. Must be a scalar.
20980//
20981// Returns the created operation.
20982func ResourceSparseApplyMomentum(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, indices tf.Output, momentum tf.Output, optional ...ResourceSparseApplyMomentumAttr) (o *tf.Operation) {
20983	if scope.Err() != nil {
20984		return
20985	}
20986	attrs := map[string]interface{}{}
20987	for _, a := range optional {
20988		a(attrs)
20989	}
20990	opspec := tf.OpSpec{
20991		Type: "ResourceSparseApplyMomentum",
20992		Input: []tf.Input{
20993			var_, accum, lr, grad, indices, momentum,
20994		},
20995		Attrs: attrs,
20996	}
20997	return scope.AddOperation(opspec)
20998}
20999
21000// An Op to permute tensors across replicated TPU instances.
21001//
21002// Each instance supplies its own input.
21003//
21004// For example, suppose there are 4 TPU instances: `[A, B, C, D]`. Passing
21005// source_target_pairs=`[[0,1],[1,2],[2,3],[3,0]]` gets the outputs:
21006// `[D, A, B, C]`.
21007//
21008// Arguments:
21009//	input: The local input to be permuted. Currently only supports float and
21010// bfloat16.
21011//	source_target_pairs: A tensor with shape [num_pairs, 2].
21012//
21013// Returns The permuted input.
21014func CollectivePermute(scope *Scope, input tf.Output, source_target_pairs tf.Output) (output tf.Output) {
21015	if scope.Err() != nil {
21016		return
21017	}
21018	opspec := tf.OpSpec{
21019		Type: "CollectivePermute",
21020		Input: []tf.Input{
21021			input, source_target_pairs,
21022		},
21023	}
21024	op := scope.AddOperation(opspec)
21025	return op.Output(0)
21026}
21027
21028// Returns the complex conjugate of a complex number.
21029//
21030// Given a tensor `input` of complex numbers, this operation returns a tensor of
21031// complex numbers that are the complex conjugate of each element in `input`. The
21032// complex numbers in `input` must be of the form \\(a + bj\\), where *a* is the
21033// real part and *b* is the imaginary part.
21034//
21035// The complex conjugate returned by this operation is of the form \\(a - bj\\).
21036//
21037// For example:
21038//
21039// ```
21040// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
21041// tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j]
21042// ```
21043func Conj(scope *Scope, input tf.Output) (output tf.Output) {
21044	if scope.Err() != nil {
21045		return
21046	}
21047	opspec := tf.OpSpec{
21048		Type: "Conj",
21049		Input: []tf.Input{
21050			input,
21051		},
21052	}
21053	op := scope.AddOperation(opspec)
21054	return op.Output(0)
21055}
21056
21057// RetrieveTPUEmbeddingCenteredRMSPropParametersAttr is an optional argument to RetrieveTPUEmbeddingCenteredRMSPropParameters.
21058type RetrieveTPUEmbeddingCenteredRMSPropParametersAttr func(optionalAttr)
21059
21060// RetrieveTPUEmbeddingCenteredRMSPropParametersTableId sets the optional table_id attribute to value.
21061// If not specified, defaults to -1
21062//
21063// REQUIRES: value >= -1
21064func RetrieveTPUEmbeddingCenteredRMSPropParametersTableId(value int64) RetrieveTPUEmbeddingCenteredRMSPropParametersAttr {
21065	return func(m optionalAttr) {
21066		m["table_id"] = value
21067	}
21068}
21069
21070// RetrieveTPUEmbeddingCenteredRMSPropParametersTableName sets the optional table_name attribute to value.
21071// If not specified, defaults to ""
21072func RetrieveTPUEmbeddingCenteredRMSPropParametersTableName(value string) RetrieveTPUEmbeddingCenteredRMSPropParametersAttr {
21073	return func(m optionalAttr) {
21074		m["table_name"] = value
21075	}
21076}
21077
21078// Retrieve centered RMSProp embedding parameters.
21079//
21080// An op that retrieves optimization parameters from embedding to host
21081// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
21082// the correct embedding table configuration. For example, this op is
21083// used to retrieve updated parameters before saving a checkpoint.
21084//
21085// Returns Parameter parameters updated by the centered RMSProp optimization algorithm.Parameter ms updated by the centered RMSProp optimization algorithm.Parameter mom updated by the centered RMSProp optimization algorithm.Parameter mg updated by the centered RMSProp optimization algorithm.
21086func RetrieveTPUEmbeddingCenteredRMSPropParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingCenteredRMSPropParametersAttr) (parameters tf.Output, ms tf.Output, mom tf.Output, mg tf.Output) {
21087	if scope.Err() != nil {
21088		return
21089	}
21090	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
21091	for _, a := range optional {
21092		a(attrs)
21093	}
21094	opspec := tf.OpSpec{
21095		Type: "RetrieveTPUEmbeddingCenteredRMSPropParameters",
21096
21097		Attrs: attrs,
21098	}
21099	op := scope.AddOperation(opspec)
21100	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
21101}
21102
21103// StringSplitAttr is an optional argument to StringSplit.
21104type StringSplitAttr func(optionalAttr)
21105
21106// StringSplitSkipEmpty sets the optional skip_empty attribute to value.
21107//
21108// value: A `bool`. If `True`, skip the empty strings from the result.
21109// If not specified, defaults to true
21110func StringSplitSkipEmpty(value bool) StringSplitAttr {
21111	return func(m optionalAttr) {
21112		m["skip_empty"] = value
21113	}
21114}
21115
21116// Split elements of `input` based on `delimiter` into a `SparseTensor`.
21117//
21118// Let N be the size of source (typically N will be the batch size). Split each
21119// element of `input` based on `delimiter` and return a `SparseTensor`
21120// containing the splitted tokens. Empty tokens are ignored.
21121//
21122// `delimiter` can be empty, or a string of split characters. If `delimiter` is an
21123//  empty string, each element of `input` is split into individual single-byte
21124//  character strings, including splitting of UTF-8 multibyte sequences. Otherwise
21125//  every character of `delimiter` is a potential split point.
21126//
21127// For example:
21128//   N = 2, input[0] is 'hello world' and input[1] is 'a b c', then the output
21129//   will be
21130//
21131//   indices = [0, 0;
21132//              0, 1;
21133//              1, 0;
21134//              1, 1;
21135//              1, 2]
21136//   shape = [2, 3]
21137//   values = ['hello', 'world', 'a', 'b', 'c']
21138//
21139// Arguments:
21140//	input: 1-D. Strings to split.
21141//	delimiter: 0-D. Delimiter characters (bytes), or empty string.
21142//
21143// Returns A dense matrix of int64 representing the indices of the sparse tensor.A vector of strings corresponding to the splited values.a length-2 vector of int64 representing the shape of the sparse
21144// tensor, where the first value is N and the second value is the maximum number
21145// of tokens in a single input entry.
21146func StringSplit(scope *Scope, input tf.Output, delimiter tf.Output, optional ...StringSplitAttr) (indices tf.Output, values tf.Output, shape tf.Output) {
21147	if scope.Err() != nil {
21148		return
21149	}
21150	attrs := map[string]interface{}{}
21151	for _, a := range optional {
21152		a(attrs)
21153	}
21154	opspec := tf.OpSpec{
21155		Type: "StringSplit",
21156		Input: []tf.Input{
21157			input, delimiter,
21158		},
21159		Attrs: attrs,
21160	}
21161	op := scope.AddOperation(opspec)
21162	return op.Output(0), op.Output(1), op.Output(2)
21163}
21164
21165// RetrieveTPUEmbeddingFTRLParametersAttr is an optional argument to RetrieveTPUEmbeddingFTRLParameters.
21166type RetrieveTPUEmbeddingFTRLParametersAttr func(optionalAttr)
21167
21168// RetrieveTPUEmbeddingFTRLParametersTableId sets the optional table_id attribute to value.
21169// If not specified, defaults to -1
21170//
21171// REQUIRES: value >= -1
21172func RetrieveTPUEmbeddingFTRLParametersTableId(value int64) RetrieveTPUEmbeddingFTRLParametersAttr {
21173	return func(m optionalAttr) {
21174		m["table_id"] = value
21175	}
21176}
21177
21178// RetrieveTPUEmbeddingFTRLParametersTableName sets the optional table_name attribute to value.
21179// If not specified, defaults to ""
21180func RetrieveTPUEmbeddingFTRLParametersTableName(value string) RetrieveTPUEmbeddingFTRLParametersAttr {
21181	return func(m optionalAttr) {
21182		m["table_name"] = value
21183	}
21184}
21185
21186// Retrieve FTRL embedding parameters.
21187//
21188// An op that retrieves optimization parameters from embedding to host
21189// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
21190// the correct embedding table configuration. For example, this op is
21191// used to retrieve updated parameters before saving a checkpoint.
21192//
21193// Returns Parameter parameters updated by the FTRL optimization algorithm.Parameter accumulators updated by the FTRL optimization algorithm.Parameter linears updated by the FTRL optimization algorithm.
21194func RetrieveTPUEmbeddingFTRLParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingFTRLParametersAttr) (parameters tf.Output, accumulators tf.Output, linears tf.Output) {
21195	if scope.Err() != nil {
21196		return
21197	}
21198	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
21199	for _, a := range optional {
21200		a(attrs)
21201	}
21202	opspec := tf.OpSpec{
21203		Type: "RetrieveTPUEmbeddingFTRLParameters",
21204
21205		Attrs: attrs,
21206	}
21207	op := scope.AddOperation(opspec)
21208	return op.Output(0), op.Output(1), op.Output(2)
21209}
21210
21211// MaxPool3DAttr is an optional argument to MaxPool3D.
21212type MaxPool3DAttr func(optionalAttr)
21213
21214// MaxPool3DDataFormat sets the optional data_format attribute to value.
21215//
21216// value: The data format of the input and output data. With the
21217// default format "NDHWC", the data is stored in the order of:
21218//     [batch, in_depth, in_height, in_width, in_channels].
21219// Alternatively, the format could be "NCDHW", the data storage order is:
21220//     [batch, in_channels, in_depth, in_height, in_width].
21221// If not specified, defaults to "NDHWC"
21222func MaxPool3DDataFormat(value string) MaxPool3DAttr {
21223	return func(m optionalAttr) {
21224		m["data_format"] = value
21225	}
21226}
21227
21228// Performs 3D max pooling on the input.
21229//
21230// Arguments:
21231//	input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over.
21232//	ksize: 1-D tensor of length 5. The size of the window for each dimension of
21233// the input tensor. Must have `ksize[0] = ksize[4] = 1`.
21234//	strides: 1-D tensor of length 5. The stride of the sliding window for each
21235// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
21236//	padding: The type of padding algorithm to use.
21237//
21238// Returns The max pooled output tensor.
21239func MaxPool3D(scope *Scope, input tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPool3DAttr) (output tf.Output) {
21240	if scope.Err() != nil {
21241		return
21242	}
21243	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
21244	for _, a := range optional {
21245		a(attrs)
21246	}
21247	opspec := tf.OpSpec{
21248		Type: "MaxPool3D",
21249		Input: []tf.Input{
21250			input,
21251		},
21252		Attrs: attrs,
21253	}
21254	op := scope.AddOperation(opspec)
21255	return op.Output(0)
21256}
21257
21258// Convert JSON-encoded Example records to binary protocol buffer strings.
21259//
21260// This op translates a tensor containing Example records, encoded using
21261// the [standard JSON
21262// mapping](https://developers.google.com/protocol-buffers/docs/proto3#json),
21263// into a tensor containing the same records encoded as binary protocol
21264// buffers. The resulting tensor can then be fed to any of the other
21265// Example-parsing ops.
21266//
21267// Arguments:
21268//	json_examples: Each string is a JSON object serialized according to the JSON
21269// mapping of the Example proto.
21270//
21271// Returns Each string is a binary Example protocol buffer corresponding
21272// to the respective element of `json_examples`.
21273func DecodeJSONExample(scope *Scope, json_examples tf.Output) (binary_examples tf.Output) {
21274	if scope.Err() != nil {
21275		return
21276	}
21277	opspec := tf.OpSpec{
21278		Type: "DecodeJSONExample",
21279		Input: []tf.Input{
21280			json_examples,
21281		},
21282	}
21283	op := scope.AddOperation(opspec)
21284	return op.Output(0)
21285}
21286
21287// QueueEnqueueManyV2Attr is an optional argument to QueueEnqueueManyV2.
21288type QueueEnqueueManyV2Attr func(optionalAttr)
21289
21290// QueueEnqueueManyV2TimeoutMs sets the optional timeout_ms attribute to value.
21291//
21292// value: If the queue is too full, this operation will block for up
21293// to timeout_ms milliseconds.
21294// Note: This option is not supported yet.
21295// If not specified, defaults to -1
21296func QueueEnqueueManyV2TimeoutMs(value int64) QueueEnqueueManyV2Attr {
21297	return func(m optionalAttr) {
21298		m["timeout_ms"] = value
21299	}
21300}
21301
21302// Enqueues zero or more tuples of one or more tensors in the given queue.
21303//
21304// This operation slices each component tensor along the 0th dimension to
21305// make multiple queue elements. All of the tuple components must have the
21306// same size in the 0th dimension.
21307//
21308// The components input has k elements, which correspond to the components of
21309// tuples stored in the given queue.
21310//
21311// N.B. If the queue is full, this operation will block until the given
21312// elements have been enqueued (or 'timeout_ms' elapses, if specified).
21313//
21314// Arguments:
21315//	handle: The handle to a queue.
21316//	components: One or more tensors from which the enqueued tensors should
21317// be taken.
21318//
21319// Returns the created operation.
21320func QueueEnqueueManyV2(scope *Scope, handle tf.Output, components []tf.Output, optional ...QueueEnqueueManyV2Attr) (o *tf.Operation) {
21321	if scope.Err() != nil {
21322		return
21323	}
21324	attrs := map[string]interface{}{}
21325	for _, a := range optional {
21326		a(attrs)
21327	}
21328	opspec := tf.OpSpec{
21329		Type: "QueueEnqueueManyV2",
21330		Input: []tf.Input{
21331			handle, tf.OutputList(components),
21332		},
21333		Attrs: attrs,
21334	}
21335	return scope.AddOperation(opspec)
21336}
21337
21338// PrintV2Attr is an optional argument to PrintV2.
21339type PrintV2Attr func(optionalAttr)
21340
21341// PrintV2OutputStream sets the optional output_stream attribute to value.
21342//
21343// value: A string specifying the output stream or logging level to print to.
21344// If not specified, defaults to "stderr"
21345func PrintV2OutputStream(value string) PrintV2Attr {
21346	return func(m optionalAttr) {
21347		m["output_stream"] = value
21348	}
21349}
21350
21351// Prints a string scalar.
21352//
21353// Prints a string scalar to the desired output_stream.
21354//
21355// Arguments:
21356//	input: The string scalar to print.
21357//
21358// Returns the created operation.
21359func PrintV2(scope *Scope, input tf.Output, optional ...PrintV2Attr) (o *tf.Operation) {
21360	if scope.Err() != nil {
21361		return
21362	}
21363	attrs := map[string]interface{}{}
21364	for _, a := range optional {
21365		a(attrs)
21366	}
21367	opspec := tf.OpSpec{
21368		Type: "PrintV2",
21369		Input: []tf.Input{
21370			input,
21371		},
21372		Attrs: attrs,
21373	}
21374	return scope.AddOperation(opspec)
21375}
21376
21377// The gradient operator for the SparseSlice op.
21378//
21379// This op takes in the upstream gradient w.r.t. non-empty values of
21380// the sliced `SparseTensor`, and outputs the gradients w.r.t.
21381// the non-empty values of input `SparseTensor`.
21382//
21383// Arguments:
21384//	backprop_val_grad: 1-D. The gradient with respect to
21385// the non-empty values of the sliced `SparseTensor`.
21386//	input_indices: 2-D.  The `indices` of the input `SparseTensor`.
21387//	input_start: 1-D. tensor represents the start of the slice.
21388//	output_indices: 2-D.  The `indices` of the sliced `SparseTensor`.
21389//
21390// Returns 1-D. The gradient with respect to the non-empty values of input `SparseTensor`.
21391func SparseSliceGrad(scope *Scope, backprop_val_grad tf.Output, input_indices tf.Output, input_start tf.Output, output_indices tf.Output) (val_grad tf.Output) {
21392	if scope.Err() != nil {
21393		return
21394	}
21395	opspec := tf.OpSpec{
21396		Type: "SparseSliceGrad",
21397		Input: []tf.Input{
21398			backprop_val_grad, input_indices, input_start, output_indices,
21399		},
21400	}
21401	op := scope.AddOperation(opspec)
21402	return op.Output(0)
21403}
21404
21405// ResourceApplyProximalAdagradAttr is an optional argument to ResourceApplyProximalAdagrad.
21406type ResourceApplyProximalAdagradAttr func(optionalAttr)
21407
21408// ResourceApplyProximalAdagradUseLocking sets the optional use_locking attribute to value.
21409//
21410// value: If True, updating of the var and accum tensors will be protected by
21411// a lock; otherwise the behavior is undefined, but may exhibit less contention.
21412// If not specified, defaults to false
21413func ResourceApplyProximalAdagradUseLocking(value bool) ResourceApplyProximalAdagradAttr {
21414	return func(m optionalAttr) {
21415		m["use_locking"] = value
21416	}
21417}
21418
21419// Update '*var' and '*accum' according to FOBOS with Adagrad learning rate.
21420//
21421// accum += grad * grad
21422// prox_v = var - lr * grad * (1 / sqrt(accum))
21423// var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
21424//
21425// Arguments:
21426//	var_: Should be from a Variable().
21427//	accum: Should be from a Variable().
21428//	lr: Scaling factor. Must be a scalar.
21429//	l1: L1 regularization. Must be a scalar.
21430//	l2: L2 regularization. Must be a scalar.
21431//	grad: The gradient.
21432//
21433// Returns the created operation.
21434func ResourceApplyProximalAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, grad tf.Output, optional ...ResourceApplyProximalAdagradAttr) (o *tf.Operation) {
21435	if scope.Err() != nil {
21436		return
21437	}
21438	attrs := map[string]interface{}{}
21439	for _, a := range optional {
21440		a(attrs)
21441	}
21442	opspec := tf.OpSpec{
21443		Type: "ResourceApplyProximalAdagrad",
21444		Input: []tf.Input{
21445			var_, accum, lr, l1, l2, grad,
21446		},
21447		Attrs: attrs,
21448	}
21449	return scope.AddOperation(opspec)
21450}
21451
21452// MutableHashTableOfTensorsV2Attr is an optional argument to MutableHashTableOfTensorsV2.
21453type MutableHashTableOfTensorsV2Attr func(optionalAttr)
21454
21455// MutableHashTableOfTensorsV2Container sets the optional container attribute to value.
21456//
21457// value: If non-empty, this table is placed in the given container.
21458// Otherwise, a default container is used.
21459// If not specified, defaults to ""
21460func MutableHashTableOfTensorsV2Container(value string) MutableHashTableOfTensorsV2Attr {
21461	return func(m optionalAttr) {
21462		m["container"] = value
21463	}
21464}
21465
21466// MutableHashTableOfTensorsV2SharedName sets the optional shared_name attribute to value.
21467//
21468// value: If non-empty, this table is shared under the given name across
21469// multiple sessions.
21470// If not specified, defaults to ""
21471func MutableHashTableOfTensorsV2SharedName(value string) MutableHashTableOfTensorsV2Attr {
21472	return func(m optionalAttr) {
21473		m["shared_name"] = value
21474	}
21475}
21476
21477// MutableHashTableOfTensorsV2UseNodeNameSharing sets the optional use_node_name_sharing attribute to value.
21478// If not specified, defaults to false
21479func MutableHashTableOfTensorsV2UseNodeNameSharing(value bool) MutableHashTableOfTensorsV2Attr {
21480	return func(m optionalAttr) {
21481		m["use_node_name_sharing"] = value
21482	}
21483}
21484
21485// MutableHashTableOfTensorsV2ValueShape sets the optional value_shape attribute to value.
21486// If not specified, defaults to <>
21487func MutableHashTableOfTensorsV2ValueShape(value tf.Shape) MutableHashTableOfTensorsV2Attr {
21488	return func(m optionalAttr) {
21489		m["value_shape"] = value
21490	}
21491}
21492
21493// Creates an empty hash table.
21494//
21495// This op creates a mutable hash table, specifying the type of its keys and
21496// values. Each value must be a vector. Data can be inserted into the table using
21497// the insert operations. It does not support the initialization operation.
21498//
21499// Arguments:
21500//	key_dtype: Type of the table keys.
21501//	value_dtype: Type of the table values.
21502//
21503// Returns Handle to a table.
21504func MutableHashTableOfTensorsV2(scope *Scope, key_dtype tf.DataType, value_dtype tf.DataType, optional ...MutableHashTableOfTensorsV2Attr) (table_handle tf.Output) {
21505	if scope.Err() != nil {
21506		return
21507	}
21508	attrs := map[string]interface{}{"key_dtype": key_dtype, "value_dtype": value_dtype}
21509	for _, a := range optional {
21510		a(attrs)
21511	}
21512	opspec := tf.OpSpec{
21513		Type: "MutableHashTableOfTensorsV2",
21514
21515		Attrs: attrs,
21516	}
21517	op := scope.AddOperation(opspec)
21518	return op.Output(0)
21519}
21520
21521// ResourceApplyProximalGradientDescentAttr is an optional argument to ResourceApplyProximalGradientDescent.
21522type ResourceApplyProximalGradientDescentAttr func(optionalAttr)
21523
21524// ResourceApplyProximalGradientDescentUseLocking sets the optional use_locking attribute to value.
21525//
21526// value: If True, the subtraction will be protected by a lock;
21527// otherwise the behavior is undefined, but may exhibit less contention.
21528// If not specified, defaults to false
21529func ResourceApplyProximalGradientDescentUseLocking(value bool) ResourceApplyProximalGradientDescentAttr {
21530	return func(m optionalAttr) {
21531		m["use_locking"] = value
21532	}
21533}
21534
21535// Update '*var' as FOBOS algorithm with fixed learning rate.
21536//
21537// prox_v = var - alpha * delta
21538// var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
21539//
21540// Arguments:
21541//	var_: Should be from a Variable().
21542//	alpha: Scaling factor. Must be a scalar.
21543//	l1: L1 regularization. Must be a scalar.
21544//	l2: L2 regularization. Must be a scalar.
21545//	delta: The change.
21546//
21547// Returns the created operation.
21548func ResourceApplyProximalGradientDescent(scope *Scope, var_ tf.Output, alpha tf.Output, l1 tf.Output, l2 tf.Output, delta tf.Output, optional ...ResourceApplyProximalGradientDescentAttr) (o *tf.Operation) {
21549	if scope.Err() != nil {
21550		return
21551	}
21552	attrs := map[string]interface{}{}
21553	for _, a := range optional {
21554		a(attrs)
21555	}
21556	opspec := tf.OpSpec{
21557		Type: "ResourceApplyProximalGradientDescent",
21558		Input: []tf.Input{
21559			var_, alpha, l1, l2, delta,
21560		},
21561		Attrs: attrs,
21562	}
21563	return scope.AddOperation(opspec)
21564}
21565
21566// Returns 0 if the denominator is zero.
21567//
21568//
21569// *NOTE*: `DivNoNan` supports broadcasting. More about broadcasting
21570// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
21571func DivNoNan(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
21572	if scope.Err() != nil {
21573		return
21574	}
21575	opspec := tf.OpSpec{
21576		Type: "DivNoNan",
21577		Input: []tf.Input{
21578			x, y,
21579		},
21580	}
21581	op := scope.AddOperation(opspec)
21582	return op.Output(0)
21583}
21584
21585// Subtracts a value from the current value of a variable.
21586//
21587// Any ReadVariableOp with a control dependency on this op is guaranteed to
21588// see the decremented value or a subsequent newer one.
21589//
21590// Arguments:
21591//	resource: handle to the resource in which to store the variable.
21592//	value: the value by which the variable will be incremented.
21593//
21594// Returns the created operation.
21595func AssignSubVariableOp(scope *Scope, resource tf.Output, value tf.Output) (o *tf.Operation) {
21596	if scope.Err() != nil {
21597		return
21598	}
21599	opspec := tf.OpSpec{
21600		Type: "AssignSubVariableOp",
21601		Input: []tf.Input{
21602			resource, value,
21603		},
21604	}
21605	return scope.AddOperation(opspec)
21606}
21607
21608// RestoreAttr is an optional argument to Restore.
21609type RestoreAttr func(optionalAttr)
21610
21611// RestorePreferredShard sets the optional preferred_shard attribute to value.
21612//
21613// value: Index of file to open first if multiple files match
21614// `file_pattern`.
21615// If not specified, defaults to -1
21616func RestorePreferredShard(value int64) RestoreAttr {
21617	return func(m optionalAttr) {
21618		m["preferred_shard"] = value
21619	}
21620}
21621
21622// Restores a tensor from checkpoint files.
21623//
21624// Reads a tensor stored in one or several files. If there are several files (for
21625// instance because a tensor was saved as slices), `file_pattern` may contain
21626// wildcard symbols (`*` and `?`) in the filename portion only, not in the
21627// directory portion.
21628//
21629// If a `file_pattern` matches several files, `preferred_shard` can be used to hint
21630// in which file the requested tensor is likely to be found. This op will first
21631// open the file at index `preferred_shard` in the list of matching files and try
21632// to restore tensors from that file.  Only if some tensors or tensor slices are
21633// not found in that first file, then the Op opens all the files. Setting
21634// `preferred_shard` to match the value passed as the `shard` input
21635// of a matching `Save` Op may speed up Restore.  This attribute only affects
21636// performance, not correctness.  The default value -1 means files are processed in
21637// order.
21638//
21639// See also `RestoreSlice`.
21640//
21641// Arguments:
21642//	file_pattern: Must have a single element. The pattern of the files from
21643// which we read the tensor.
21644//	tensor_name: Must have a single element. The name of the tensor to be
21645// restored.
21646//	dt: The type of the tensor to be restored.
21647//
21648// Returns The restored tensor.
21649func Restore(scope *Scope, file_pattern tf.Output, tensor_name tf.Output, dt tf.DataType, optional ...RestoreAttr) (tensor tf.Output) {
21650	if scope.Err() != nil {
21651		return
21652	}
21653	attrs := map[string]interface{}{"dt": dt}
21654	for _, a := range optional {
21655		a(attrs)
21656	}
21657	opspec := tf.OpSpec{
21658		Type: "Restore",
21659		Input: []tf.Input{
21660			file_pattern, tensor_name,
21661		},
21662		Attrs: attrs,
21663	}
21664	op := scope.AddOperation(opspec)
21665	return op.Output(0)
21666}
21667
21668// QuantizedResizeBilinearAttr is an optional argument to QuantizedResizeBilinear.
21669type QuantizedResizeBilinearAttr func(optionalAttr)
21670
21671// QuantizedResizeBilinearAlignCorners sets the optional align_corners attribute to value.
21672//
21673// value: If true, the centers of the 4 corner pixels of the input and output tensors are
21674// aligned, preserving the values at the corner pixels. Defaults to false.
21675// If not specified, defaults to false
21676func QuantizedResizeBilinearAlignCorners(value bool) QuantizedResizeBilinearAttr {
21677	return func(m optionalAttr) {
21678		m["align_corners"] = value
21679	}
21680}
21681
21682// QuantizedResizeBilinearHalfPixelCenters sets the optional half_pixel_centers attribute to value.
21683// If not specified, defaults to false
21684func QuantizedResizeBilinearHalfPixelCenters(value bool) QuantizedResizeBilinearAttr {
21685	return func(m optionalAttr) {
21686		m["half_pixel_centers"] = value
21687	}
21688}
21689
21690// Resize quantized `images` to `size` using quantized bilinear interpolation.
21691//
21692// Input images and output images must be quantized types.
21693//
21694// Arguments:
21695//	images: 4-D with shape `[batch, height, width, channels]`.
21696//	size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
21697// new size for the images.
21698//
21699//
21700//
21701// Returns 4-D with shape
21702// `[batch, new_height, new_width, channels]`.
21703func QuantizedResizeBilinear(scope *Scope, images tf.Output, size tf.Output, min tf.Output, max tf.Output, optional ...QuantizedResizeBilinearAttr) (resized_images tf.Output, out_min tf.Output, out_max tf.Output) {
21704	if scope.Err() != nil {
21705		return
21706	}
21707	attrs := map[string]interface{}{}
21708	for _, a := range optional {
21709		a(attrs)
21710	}
21711	opspec := tf.OpSpec{
21712		Type: "QuantizedResizeBilinear",
21713		Input: []tf.Input{
21714			images, size, min, max,
21715		},
21716		Attrs: attrs,
21717	}
21718	op := scope.AddOperation(opspec)
21719	return op.Output(0), op.Output(1), op.Output(2)
21720}
21721
21722// Creates a dataset that uses a custom thread pool to compute `input_dataset`.
21723//
21724// Arguments:
21725//
21726//	num_threads: Identifies the number of threads to use for the private threadpool.
21727//
21728//
21729func ExperimentalPrivateThreadPoolDataset(scope *Scope, input_dataset tf.Output, num_threads tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
21730	if scope.Err() != nil {
21731		return
21732	}
21733	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
21734	opspec := tf.OpSpec{
21735		Type: "ExperimentalPrivateThreadPoolDataset",
21736		Input: []tf.Input{
21737			input_dataset, num_threads,
21738		},
21739		Attrs: attrs,
21740	}
21741	op := scope.AddOperation(opspec)
21742	return op.Output(0)
21743}
21744
21745// StackV2Attr is an optional argument to StackV2.
21746type StackV2Attr func(optionalAttr)
21747
21748// StackV2StackName sets the optional stack_name attribute to value.
21749//
21750// value: Overrides the name used for the temporary stack resource. Default
21751// value is the name of the 'Stack' op (which is guaranteed unique).
21752// If not specified, defaults to ""
21753func StackV2StackName(value string) StackV2Attr {
21754	return func(m optionalAttr) {
21755		m["stack_name"] = value
21756	}
21757}
21758
21759// A stack that produces elements in first-in last-out order.
21760//
21761// Arguments:
21762//	max_size: The maximum size of the stack if non-negative. If negative, the stack
21763// size is unlimited.
21764//	elem_type: The type of the elements on the stack.
21765//
21766// Returns The handle to the stack.
21767func StackV2(scope *Scope, max_size tf.Output, elem_type tf.DataType, optional ...StackV2Attr) (handle tf.Output) {
21768	if scope.Err() != nil {
21769		return
21770	}
21771	attrs := map[string]interface{}{"elem_type": elem_type}
21772	for _, a := range optional {
21773		a(attrs)
21774	}
21775	opspec := tf.OpSpec{
21776		Type: "StackV2",
21777		Input: []tf.Input{
21778			max_size,
21779		},
21780		Attrs: attrs,
21781	}
21782	op := scope.AddOperation(opspec)
21783	return op.Output(0)
21784}
21785
21786// CudnnRNNBackpropAttr is an optional argument to CudnnRNNBackprop.
21787type CudnnRNNBackpropAttr func(optionalAttr)
21788
21789// CudnnRNNBackpropRnnMode sets the optional rnn_mode attribute to value.
21790// If not specified, defaults to "lstm"
21791func CudnnRNNBackpropRnnMode(value string) CudnnRNNBackpropAttr {
21792	return func(m optionalAttr) {
21793		m["rnn_mode"] = value
21794	}
21795}
21796
21797// CudnnRNNBackpropInputMode sets the optional input_mode attribute to value.
21798// If not specified, defaults to "linear_input"
21799func CudnnRNNBackpropInputMode(value string) CudnnRNNBackpropAttr {
21800	return func(m optionalAttr) {
21801		m["input_mode"] = value
21802	}
21803}
21804
21805// CudnnRNNBackpropDirection sets the optional direction attribute to value.
21806// If not specified, defaults to "unidirectional"
21807func CudnnRNNBackpropDirection(value string) CudnnRNNBackpropAttr {
21808	return func(m optionalAttr) {
21809		m["direction"] = value
21810	}
21811}
21812
21813// CudnnRNNBackpropDropout sets the optional dropout attribute to value.
21814// If not specified, defaults to 0
21815func CudnnRNNBackpropDropout(value float32) CudnnRNNBackpropAttr {
21816	return func(m optionalAttr) {
21817		m["dropout"] = value
21818	}
21819}
21820
21821// CudnnRNNBackpropSeed sets the optional seed attribute to value.
21822// If not specified, defaults to 0
21823func CudnnRNNBackpropSeed(value int64) CudnnRNNBackpropAttr {
21824	return func(m optionalAttr) {
21825		m["seed"] = value
21826	}
21827}
21828
21829// CudnnRNNBackpropSeed2 sets the optional seed2 attribute to value.
21830// If not specified, defaults to 0
21831func CudnnRNNBackpropSeed2(value int64) CudnnRNNBackpropAttr {
21832	return func(m optionalAttr) {
21833		m["seed2"] = value
21834	}
21835}
21836
21837// Backprop step of CudnnRNN.
21838//
21839// Compute the backprop of both data and weights in a RNN.
21840//
21841// rnn_mode: Indicates the type of the RNN model.
21842// input_mode: Indicate whether there is a linear projection between the input and
21843//     the actual computation before the first layer. 'skip_input' is only allowed
21844//     when input_size == num_units; 'auto_select' implies 'skip_input' when
21845//     input_size == num_units; otherwise, it implies 'linear_input'.
21846// direction: Indicates whether a bidirectional model will be used. Should be
21847//   "unidirectional" or "bidirectional".
21848// dropout: Dropout probability. When set to 0., dropout is disabled.
21849// seed: The 1st part of a seed to initialize dropout.
21850// seed2: The 2nd part of a seed to initialize dropout.
21851// input: A 3-D tensor with the shape of [seq_length, batch_size, input_size].
21852// input_h: A 3-D tensor with the shape of [num_layer * dir, batch_size,
21853//     num_units].
21854// input_c: For LSTM, a 3-D tensor with the shape of
21855//     [num_layer * dir, batch, num_units]. For other models, it is ignored.
21856// params: A 1-D tensor that contains the weights and biases in an opaque layout.
21857//     The size must be created through CudnnRNNParamsSize, and initialized
21858//     separately. Note that they might not be compatible across different
21859//     generations. So it is a good idea to save and restore
21860// output: A 3-D tensor with the shape of [seq_length, batch_size,
21861//     dir * num_units].
21862// output_h: The same shape has input_h.
21863// output_c: The same shape as input_c for LSTM. An empty tensor for other models.
21864// output_backprop: A 3-D tensor with the same shape as output in the forward pass.
21865// output_h_backprop: A 3-D tensor with the same shape as output_h in the forward
21866//     pass.
21867// output_c_backprop: A 3-D tensor with the same shape as output_c in the forward
21868//     pass.
21869// reserve_space: The same reserve_space produced in for forward operation.
21870// input_backprop: The backprop to input in the forward pass. Has the same shape
21871//     as input.
21872// input_h_backprop: The backprop to input_h in the forward pass. Has the same
21873//     shape as input_h.
21874// input_c_backprop: The backprop to input_c in the forward pass. Has the same
21875//     shape as input_c.
21876// params_backprop: The backprop to the params buffer in the forward pass. Has the
21877//     same shape as params.
21878func CudnnRNNBackprop(scope *Scope, input tf.Output, input_h tf.Output, input_c tf.Output, params tf.Output, output tf.Output, output_h tf.Output, output_c tf.Output, output_backprop tf.Output, output_h_backprop tf.Output, output_c_backprop tf.Output, reserve_space tf.Output, optional ...CudnnRNNBackpropAttr) (input_backprop tf.Output, input_h_backprop tf.Output, input_c_backprop tf.Output, params_backprop tf.Output) {
21879	if scope.Err() != nil {
21880		return
21881	}
21882	attrs := map[string]interface{}{}
21883	for _, a := range optional {
21884		a(attrs)
21885	}
21886	opspec := tf.OpSpec{
21887		Type: "CudnnRNNBackprop",
21888		Input: []tf.Input{
21889			input, input_h, input_c, params, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space,
21890		},
21891		Attrs: attrs,
21892	}
21893	op := scope.AddOperation(opspec)
21894	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
21895}
21896
21897// InfeedEnqueueAttr is an optional argument to InfeedEnqueue.
21898type InfeedEnqueueAttr func(optionalAttr)
21899
21900// InfeedEnqueueShape sets the optional shape attribute to value.
21901//
21902// value: The shape of the tensor.
21903// If not specified, defaults to <>
21904func InfeedEnqueueShape(value tf.Shape) InfeedEnqueueAttr {
21905	return func(m optionalAttr) {
21906		m["shape"] = value
21907	}
21908}
21909
21910// InfeedEnqueueLayout sets the optional layout attribute to value.
21911//
21912// value: A vector holding the requested layout in minor-to-major sequence.
21913// If a layout attribute is passed, but its values are all -1, the layout will
21914// be computed by the infeed operation.
21915// If not specified, defaults to <>
21916func InfeedEnqueueLayout(value []int64) InfeedEnqueueAttr {
21917	return func(m optionalAttr) {
21918		m["layout"] = value
21919	}
21920}
21921
21922// InfeedEnqueueDeviceOrdinal sets the optional device_ordinal attribute to value.
21923//
21924// value: The TPU device to use. This should be -1 when the Op
21925// is running on a TPU device, and >= 0 when the Op is running on the CPU
21926// device.
21927// If not specified, defaults to -1
21928func InfeedEnqueueDeviceOrdinal(value int64) InfeedEnqueueAttr {
21929	return func(m optionalAttr) {
21930		m["device_ordinal"] = value
21931	}
21932}
21933
21934// An op which feeds a single Tensor value into the computation.
21935//
21936// Arguments:
21937//	input: A tensor that will be provided using the infeed mechanism.
21938//
21939// Returns the created operation.
21940func InfeedEnqueue(scope *Scope, input tf.Output, optional ...InfeedEnqueueAttr) (o *tf.Operation) {
21941	if scope.Err() != nil {
21942		return
21943	}
21944	attrs := map[string]interface{}{}
21945	for _, a := range optional {
21946		a(attrs)
21947	}
21948	opspec := tf.OpSpec{
21949		Type: "InfeedEnqueue",
21950		Input: []tf.Input{
21951			input,
21952		},
21953		Attrs: attrs,
21954	}
21955	return scope.AddOperation(opspec)
21956}
21957
21958// Computes softmax cross entropy cost and gradients to backpropagate.
21959//
21960// Inputs are the logits, not probabilities.
21961//
21962// Arguments:
21963//	features: batch_size x num_classes matrix
21964//	labels: batch_size x num_classes matrix
21965// The caller must ensure that each batch of labels represents a valid
21966// probability distribution.
21967//
21968// Returns Per example loss (batch_size vector).backpropagated gradients (batch_size x num_classes matrix).
21969func SoftmaxCrossEntropyWithLogits(scope *Scope, features tf.Output, labels tf.Output) (loss tf.Output, backprop tf.Output) {
21970	if scope.Err() != nil {
21971		return
21972	}
21973	opspec := tf.OpSpec{
21974		Type: "SoftmaxCrossEntropyWithLogits",
21975		Input: []tf.Input{
21976			features, labels,
21977		},
21978	}
21979	op := scope.AddOperation(opspec)
21980	return op.Output(0), op.Output(1)
21981}
21982
21983// ReduceJoinAttr is an optional argument to ReduceJoin.
21984type ReduceJoinAttr func(optionalAttr)
21985
21986// ReduceJoinKeepDims sets the optional keep_dims attribute to value.
21987//
21988// value: If `True`, retain reduced dimensions with length `1`.
21989// If not specified, defaults to false
21990func ReduceJoinKeepDims(value bool) ReduceJoinAttr {
21991	return func(m optionalAttr) {
21992		m["keep_dims"] = value
21993	}
21994}
21995
21996// ReduceJoinSeparator sets the optional separator attribute to value.
21997//
21998// value: The separator to use when joining.
21999// If not specified, defaults to ""
22000func ReduceJoinSeparator(value string) ReduceJoinAttr {
22001	return func(m optionalAttr) {
22002		m["separator"] = value
22003	}
22004}
22005
22006// Joins a string Tensor across the given dimensions.
22007//
22008// Computes the string join across dimensions in the given string Tensor of shape
22009// `[\\(d_0, d_1, ..., d_{n-1}\\)]`.  Returns a new Tensor created by joining the input
22010// strings with the given separator (default: empty string).  Negative indices are
22011// counted backwards from the end, with `-1` being equivalent to `n - 1`.  If
22012// indices are not specified, joins across all dimensions beginning from `n - 1`
22013// through `0`.
22014//
22015// For example:
22016//
22017// ```python
22018// # tensor `a` is [["a", "b"], ["c", "d"]]
22019// tf.reduce_join(a, 0) ==> ["ac", "bd"]
22020// tf.reduce_join(a, 1) ==> ["ab", "cd"]
22021// tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> ["ac", "bd"]
22022// tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> ["ab", "cd"]
22023// tf.reduce_join(a, 0, keep_dims=True) ==> [["ac", "bd"]]
22024// tf.reduce_join(a, 1, keep_dims=True) ==> [["ab"], ["cd"]]
22025// tf.reduce_join(a, 0, separator=".") ==> ["a.c", "b.d"]
22026// tf.reduce_join(a, [0, 1]) ==> "acbd"
22027// tf.reduce_join(a, [1, 0]) ==> "abcd"
22028// tf.reduce_join(a, []) ==> [["a", "b"], ["c", "d"]]
22029// tf.reduce_join(a) = tf.reduce_join(a, [1, 0]) ==> "abcd"
22030// ```
22031//
22032// Arguments:
22033//	inputs: The input to be joined.  All reduced indices must have non-zero size.
22034//	reduction_indices: The dimensions to reduce over.  Dimensions are reduced in the
22035// order specified.  Omitting `reduction_indices` is equivalent to passing
22036// `[n-1, n-2, ..., 0]`.  Negative indices from `-n` to `-1` are supported.
22037//
22038// Returns Has shape equal to that of the input with reduced dimensions removed or
22039// set to `1` depending on `keep_dims`.
22040func ReduceJoin(scope *Scope, inputs tf.Output, reduction_indices tf.Output, optional ...ReduceJoinAttr) (output tf.Output) {
22041	if scope.Err() != nil {
22042		return
22043	}
22044	attrs := map[string]interface{}{}
22045	for _, a := range optional {
22046		a(attrs)
22047	}
22048	opspec := tf.OpSpec{
22049		Type: "ReduceJoin",
22050		Input: []tf.Input{
22051			inputs, reduction_indices,
22052		},
22053		Attrs: attrs,
22054	}
22055	op := scope.AddOperation(opspec)
22056	return op.Output(0)
22057}
22058
22059// TopKAttr is an optional argument to TopK.
22060type TopKAttr func(optionalAttr)
22061
22062// TopKSorted sets the optional sorted attribute to value.
22063//
22064// value: If true the resulting `k` elements will be sorted by the values in
22065// descending order.
22066// If not specified, defaults to true
22067func TopKSorted(value bool) TopKAttr {
22068	return func(m optionalAttr) {
22069		m["sorted"] = value
22070	}
22071}
22072
22073// Finds values and indices of the `k` largest elements for the last dimension.
22074//
22075// DEPRECATED at GraphDef version 7: Use TopKV2 instead
22076//
22077// If the input is a vector (rank-1), finds the `k` largest entries in the vector
22078// and outputs their values and indices as vectors.  Thus `values[j]` is the
22079// `j`-th largest entry in `input`, and its index is `indices[j]`.
22080//
22081// For matrices (resp. higher rank input), computes the top `k` entries in each
22082// row (resp. vector along the last dimension).  Thus,
22083//
22084//     values.shape = indices.shape = input.shape[:-1] + [k]
22085//
22086// If two elements are equal, the lower-index element appears first.
22087//
22088// If `k` varies dynamically, use `TopKV2` below.
22089//
22090// Arguments:
22091//	input: 1-D or higher with last dimension at least `k`.
22092//	k: Number of top elements to look for along the last dimension (along each
22093// row for matrices).
22094//
22095// Returns The `k` largest elements along each last dimensional slice.The indices of `values` within the last dimension of `input`.
22096func TopK(scope *Scope, input tf.Output, k int64, optional ...TopKAttr) (values tf.Output, indices tf.Output) {
22097	if scope.Err() != nil {
22098		return
22099	}
22100	attrs := map[string]interface{}{"k": k}
22101	for _, a := range optional {
22102		a(attrs)
22103	}
22104	opspec := tf.OpSpec{
22105		Type: "TopK",
22106		Input: []tf.Input{
22107			input,
22108		},
22109		Attrs: attrs,
22110	}
22111	op := scope.AddOperation(opspec)
22112	return op.Output(0), op.Output(1)
22113}
22114
22115// BatchToSpace for N-D tensors of type T.
22116//
22117// This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of shape
22118// `block_shape + [batch]`, interleaves these blocks back into the grid defined by
22119// the spatial dimensions `[1, ..., M]`, to obtain a result with the same rank as
22120// the input.  The spatial dimensions of this intermediate result are then
22121// optionally cropped according to `crops` to produce the output.  This is the
22122// reverse of SpaceToBatch.  See below for a precise description.
22123//
22124// Arguments:
22125//	input: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,
22126// where spatial_shape has M dimensions.
22127//	block_shape: 1-D with shape `[M]`, all values must be >= 1.
22128//	crops: 2-D with shape `[M, 2]`, all values must be >= 0.
22129//   `crops[i] = [crop_start, crop_end]` specifies the amount to crop from input
22130//   dimension `i + 1`, which corresponds to spatial dimension `i`.  It is
22131//   required that
22132//   `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`.
22133//
22134// This operation is equivalent to the following steps:
22135//
22136// 1. Reshape `input` to `reshaped` of shape:
22137//      [block_shape[0], ..., block_shape[M-1],
22138//       batch / prod(block_shape),
22139//       input_shape[1], ..., input_shape[N-1]]
22140//
22141// 2. Permute dimensions of `reshaped` to produce `permuted` of shape
22142//      [batch / prod(block_shape),
22143//
22144//       input_shape[1], block_shape[0],
22145//       ...,
22146//       input_shape[M], block_shape[M-1],
22147//
22148//       input_shape[M+1], ..., input_shape[N-1]]
22149//
22150// 3. Reshape `permuted` to produce `reshaped_permuted` of shape
22151//      [batch / prod(block_shape),
22152//
22153//       input_shape[1] * block_shape[0],
22154//       ...,
22155//       input_shape[M] * block_shape[M-1],
22156//
22157//       input_shape[M+1],
22158//       ...,
22159//       input_shape[N-1]]
22160//
22161// 4. Crop the start and end of dimensions `[1, ..., M]` of
22162//    `reshaped_permuted` according to `crops` to produce the output of shape:
22163//      [batch / prod(block_shape),
22164//
22165//       input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1],
22166//       ...,
22167//       input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1],
22168//
22169//       input_shape[M+1], ..., input_shape[N-1]]
22170//
22171// Some examples:
22172//
22173// (1) For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, and
22174//     `crops = [[0, 0], [0, 0]]`:
22175//
22176// ```
22177// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
22178// ```
22179//
22180// The output tensor has shape `[1, 2, 2, 1]` and value:
22181//
22182// ```
22183// x = [[[[1], [2]], [[3], [4]]]]
22184// ```
22185//
22186// (2) For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, and
22187//     `crops = [[0, 0], [0, 0]]`:
22188//
22189// ```
22190// [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
22191// ```
22192//
22193// The output tensor has shape `[1, 2, 2, 3]` and value:
22194//
22195// ```
22196// x = [[[[1, 2, 3], [4, 5, 6]],
22197//       [[7, 8, 9], [10, 11, 12]]]]
22198// ```
22199//
22200// (3) For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, and
22201//     `crops = [[0, 0], [0, 0]]`:
22202//
22203// ```
22204// x = [[[[1], [3]], [[9], [11]]],
22205//      [[[2], [4]], [[10], [12]]],
22206//      [[[5], [7]], [[13], [15]]],
22207//      [[[6], [8]], [[14], [16]]]]
22208// ```
22209//
22210// The output tensor has shape `[1, 4, 4, 1]` and value:
22211//
22212// ```
22213// x = [[[1],   [2],  [3],  [4]],
22214//      [[5],   [6],  [7],  [8]],
22215//      [[9],  [10], [11],  [12]],
22216//      [[13], [14], [15],  [16]]]
22217// ```
22218//
22219// (4) For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and
22220//     `crops = [[0, 0], [2, 0]]`:
22221//
22222// ```
22223// x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
22224//      [[[0], [2], [4]]], [[[0], [10], [12]]],
22225//      [[[0], [5], [7]]], [[[0], [13], [15]]],
22226//      [[[0], [6], [8]]], [[[0], [14], [16]]]]
22227// ```
22228//
22229// The output tensor has shape `[2, 2, 4, 1]` and value:
22230//
22231// ```
22232// x = [[[[1],   [2],  [3],  [4]],
22233//       [[5],   [6],  [7],  [8]]],
22234//      [[[9],  [10], [11],  [12]],
22235//       [[13], [14], [15],  [16]]]]
22236// ```
22237func BatchToSpaceND(scope *Scope, input tf.Output, block_shape tf.Output, crops tf.Output) (output tf.Output) {
22238	if scope.Err() != nil {
22239		return
22240	}
22241	opspec := tf.OpSpec{
22242		Type: "BatchToSpaceND",
22243		Input: []tf.Input{
22244			input, block_shape, crops,
22245		},
22246	}
22247	op := scope.AddOperation(opspec)
22248	return op.Output(0)
22249}
22250
22251// UnpackAttr is an optional argument to Unpack.
22252type UnpackAttr func(optionalAttr)
22253
22254// UnpackAxis sets the optional axis attribute to value.
22255//
22256// value: Dimension along which to unpack.  Negative values wrap around, so the
22257// valid range is `[-R, R)`.
22258// If not specified, defaults to 0
22259func UnpackAxis(value int64) UnpackAttr {
22260	return func(m optionalAttr) {
22261		m["axis"] = value
22262	}
22263}
22264
22265// Unpacks a given dimension of a rank-`R` tensor into `num` rank-`(R-1)` tensors.
22266//
22267// Unpacks `num` tensors from `value` by chipping it along the `axis` dimension.
22268// For example, given a tensor of shape `(A, B, C, D)`;
22269//
22270// If `axis == 0` then the i'th tensor in `output` is the slice `value[i, :, :, :]`
22271//   and each tensor in `output` will have shape `(B, C, D)`. (Note that the
22272//   dimension unpacked along is gone, unlike `split`).
22273//
22274// If `axis == 1` then the i'th tensor in `output` is the slice `value[:, i, :, :]`
22275//   and each tensor in `output` will have shape `(A, C, D)`.
22276// Etc.
22277//
22278// This is the opposite of `pack`.
22279//
22280// Arguments:
22281//	value: 1-D or higher, with `axis` dimension size equal to `num`.
22282//
22283//
22284// Returns The list of tensors unpacked from `value`.
22285func Unpack(scope *Scope, value tf.Output, num int64, optional ...UnpackAttr) (output []tf.Output) {
22286	if scope.Err() != nil {
22287		return
22288	}
22289	attrs := map[string]interface{}{"num": num}
22290	for _, a := range optional {
22291		a(attrs)
22292	}
22293	opspec := tf.OpSpec{
22294		Type: "Unpack",
22295		Input: []tf.Input{
22296			value,
22297		},
22298		Attrs: attrs,
22299	}
22300	op := scope.AddOperation(opspec)
22301	if scope.Err() != nil {
22302		return
22303	}
22304	var idx int
22305	var err error
22306	if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
22307		scope.UpdateErr("Unpack", err)
22308		return
22309	}
22310	return output
22311}
22312
22313// Delete the stack from its resource container.
22314//
22315// Arguments:
22316//	handle: The handle to a stack.
22317//
22318// Returns the created operation.
22319func StackCloseV2(scope *Scope, handle tf.Output) (o *tf.Operation) {
22320	if scope.Err() != nil {
22321		return
22322	}
22323	opspec := tf.OpSpec{
22324		Type: "StackCloseV2",
22325		Input: []tf.Input{
22326			handle,
22327		},
22328	}
22329	return scope.AddOperation(opspec)
22330}
22331
22332// Increments variable pointed to by 'resource' until it reaches 'limit'.
22333//
22334// Arguments:
22335//	resource: Should be from a scalar `Variable` node.
22336//	limit: If incrementing ref would bring it above limit, instead generates an
22337// 'OutOfRange' error.
22338//
22339//
22340// Returns A copy of the input before increment. If nothing else modifies the
22341// input, the values produced will all be distinct.
22342func ResourceCountUpTo(scope *Scope, resource tf.Output, limit int64, T tf.DataType) (output tf.Output) {
22343	if scope.Err() != nil {
22344		return
22345	}
22346	attrs := map[string]interface{}{"limit": limit, "T": T}
22347	opspec := tf.OpSpec{
22348		Type: "ResourceCountUpTo",
22349		Input: []tf.Input{
22350			resource,
22351		},
22352		Attrs: attrs,
22353	}
22354	op := scope.AddOperation(opspec)
22355	return op.Output(0)
22356}
22357
22358// Computes softsign gradients for a softsign operation.
22359//
22360// Arguments:
22361//	gradients: The backpropagated gradients to the corresponding softsign operation.
22362//	features: The features passed as input to the corresponding softsign operation.
22363//
22364// Returns The gradients: `gradients / (1 + abs(features)) ** 2`.
22365func SoftsignGrad(scope *Scope, gradients tf.Output, features tf.Output) (backprops tf.Output) {
22366	if scope.Err() != nil {
22367		return
22368	}
22369	opspec := tf.OpSpec{
22370		Type: "SoftsignGrad",
22371		Input: []tf.Input{
22372			gradients, features,
22373		},
22374	}
22375	op := scope.AddOperation(opspec)
22376	return op.Output(0)
22377}
22378
22379// Provides the time since epoch in seconds.
22380//
22381// Returns the timestamp as a `float64` for seconds since the Unix epoch.
22382//
22383// Note: the timestamp is computed when the op is executed, not when it is added
22384// to the graph.
22385func Timestamp(scope *Scope) (ts tf.Output) {
22386	if scope.Err() != nil {
22387		return
22388	}
22389	opspec := tf.OpSpec{
22390		Type: "Timestamp",
22391	}
22392	op := scope.AddOperation(opspec)
22393	return op.Output(0)
22394}
22395
22396// Returns immutable tensor from memory region.
22397//
22398// The current implementation memmaps the tensor from a file.
22399//
22400// Arguments:
22401//	dtype: Type of the returned tensor.
22402//	shape: Shape of the returned tensor.
22403//	memory_region_name: Name of readonly memory region used by the tensor, see
22404// NewReadOnlyMemoryRegionFromFile in tensorflow::Env.
22405func ImmutableConst(scope *Scope, dtype tf.DataType, shape tf.Shape, memory_region_name string) (tensor tf.Output) {
22406	if scope.Err() != nil {
22407		return
22408	}
22409	attrs := map[string]interface{}{"dtype": dtype, "shape": shape, "memory_region_name": memory_region_name}
22410	opspec := tf.OpSpec{
22411		Type: "ImmutableConst",
22412
22413		Attrs: attrs,
22414	}
22415	op := scope.AddOperation(opspec)
22416	return op.Output(0)
22417}
22418
22419// StringJoinAttr is an optional argument to StringJoin.
22420type StringJoinAttr func(optionalAttr)
22421
22422// StringJoinSeparator sets the optional separator attribute to value.
22423//
22424// value: string, an optional join separator.
22425// If not specified, defaults to ""
22426func StringJoinSeparator(value string) StringJoinAttr {
22427	return func(m optionalAttr) {
22428		m["separator"] = value
22429	}
22430}
22431
22432// Joins the strings in the given list of string tensors into one tensor;
22433//
22434// with the given separator (default is an empty separator).
22435//
22436// Arguments:
22437//	inputs: A list of string tensors.  The tensors must all have the same shape,
22438// or be scalars.  Scalars may be mixed in; these will be broadcast to the shape
22439// of non-scalar inputs.
22440func StringJoin(scope *Scope, inputs []tf.Output, optional ...StringJoinAttr) (output tf.Output) {
22441	if scope.Err() != nil {
22442		return
22443	}
22444	attrs := map[string]interface{}{}
22445	for _, a := range optional {
22446		a(attrs)
22447	}
22448	opspec := tf.OpSpec{
22449		Type: "StringJoin",
22450		Input: []tf.Input{
22451			tf.OutputList(inputs),
22452		},
22453		Attrs: attrs,
22454	}
22455	op := scope.AddOperation(opspec)
22456	return op.Output(0)
22457}
22458
22459// Creates and returns an empty tensor list.
22460//
22461// All list elements must be tensors of dtype element_dtype and shape compatible
22462// with element_shape.
22463//
22464// handle: an empty tensor list.
22465// element_dtype: the type of elements in the list.
22466// element_shape: a shape compatible with that of elements in the list.
22467func EmptyTensorList(scope *Scope, element_shape tf.Output, max_num_elements tf.Output, element_dtype tf.DataType) (handle tf.Output) {
22468	if scope.Err() != nil {
22469		return
22470	}
22471	attrs := map[string]interface{}{"element_dtype": element_dtype}
22472	opspec := tf.OpSpec{
22473		Type: "EmptyTensorList",
22474		Input: []tf.Input{
22475			element_shape, max_num_elements,
22476		},
22477		Attrs: attrs,
22478	}
22479	op := scope.AddOperation(opspec)
22480	return op.Output(0)
22481}
22482
22483// Returns a list of tensors with the same shapes and contents as the input
22484//
22485// tensors.
22486//
22487// This op can be used to override the gradient for complicated functions. For
22488// example, suppose y = f(x) and we wish to apply a custom function g for backprop
22489// such that dx = g(dy). In Python,
22490//
22491// ```python
22492// with tf.get_default_graph().gradient_override_map(
22493//     {'IdentityN': 'OverrideGradientWithG'}):
22494//   y, _ = identity_n([f(x), x])
22495//
22496// @tf.RegisterGradient('OverrideGradientWithG')
22497// def ApplyG(op, dy, _):
22498//   return [None, g(dy)]  # Do not backprop to f(x).
22499// ```
22500func IdentityN(scope *Scope, input []tf.Output) (output []tf.Output) {
22501	if scope.Err() != nil {
22502		return
22503	}
22504	opspec := tf.OpSpec{
22505		Type: "IdentityN",
22506		Input: []tf.Input{
22507			tf.OutputList(input),
22508		},
22509	}
22510	op := scope.AddOperation(opspec)
22511	if scope.Err() != nil {
22512		return
22513	}
22514	var idx int
22515	var err error
22516	if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
22517		scope.UpdateErr("IdentityN", err)
22518		return
22519	}
22520	return output
22521}
22522
22523// ResourceApplyCenteredRMSPropAttr is an optional argument to ResourceApplyCenteredRMSProp.
22524type ResourceApplyCenteredRMSPropAttr func(optionalAttr)
22525
22526// ResourceApplyCenteredRMSPropUseLocking sets the optional use_locking attribute to value.
22527//
22528// value: If `True`, updating of the var, mg, ms, and mom tensors is
22529// protected by a lock; otherwise the behavior is undefined, but may exhibit less
22530// contention.
22531// If not specified, defaults to false
22532func ResourceApplyCenteredRMSPropUseLocking(value bool) ResourceApplyCenteredRMSPropAttr {
22533	return func(m optionalAttr) {
22534		m["use_locking"] = value
22535	}
22536}
22537
22538// Update '*var' according to the centered RMSProp algorithm.
22539//
22540// The centered RMSProp algorithm uses an estimate of the centered second moment
22541// (i.e., the variance) for normalization, as opposed to regular RMSProp, which
22542// uses the (uncentered) second moment. This often helps with training, but is
22543// slightly more expensive in terms of computation and memory.
22544//
22545// Note that in dense implementation of this algorithm, mg, ms, and mom will
22546// update even if the grad is zero, but in this sparse implementation, mg, ms,
22547// and mom will not update in iterations during which the grad is zero.
22548//
22549// mean_square = decay * mean_square + (1-decay) * gradient ** 2
22550// mean_grad = decay * mean_grad + (1-decay) * gradient
22551//
22552// Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
22553//
22554// mg <- rho * mg_{t-1} + (1-rho) * grad
22555// ms <- rho * ms_{t-1} + (1-rho) * grad * grad
22556// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon)
22557// var <- var - mom
22558//
22559// Arguments:
22560//	var_: Should be from a Variable().
22561//	mg: Should be from a Variable().
22562//	ms: Should be from a Variable().
22563//	mom: Should be from a Variable().
22564//	lr: Scaling factor. Must be a scalar.
22565//	rho: Decay rate. Must be a scalar.
22566//
22567//	epsilon: Ridge term. Must be a scalar.
22568//	grad: The gradient.
22569//
22570// Returns the created operation.
22571func ResourceApplyCenteredRMSProp(scope *Scope, var_ tf.Output, mg tf.Output, ms tf.Output, mom tf.Output, lr tf.Output, rho tf.Output, momentum tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyCenteredRMSPropAttr) (o *tf.Operation) {
22572	if scope.Err() != nil {
22573		return
22574	}
22575	attrs := map[string]interface{}{}
22576	for _, a := range optional {
22577		a(attrs)
22578	}
22579	opspec := tf.OpSpec{
22580		Type: "ResourceApplyCenteredRMSProp",
22581		Input: []tf.Input{
22582			var_, mg, ms, mom, lr, rho, momentum, epsilon, grad,
22583		},
22584		Attrs: attrs,
22585	}
22586	return scope.AddOperation(opspec)
22587}
22588
22589// ResourceSparseApplyCenteredRMSPropAttr is an optional argument to ResourceSparseApplyCenteredRMSProp.
22590type ResourceSparseApplyCenteredRMSPropAttr func(optionalAttr)
22591
22592// ResourceSparseApplyCenteredRMSPropUseLocking sets the optional use_locking attribute to value.
22593//
22594// value: If `True`, updating of the var, mg, ms, and mom tensors is
22595// protected by a lock; otherwise the behavior is undefined, but may exhibit less
22596// contention.
22597// If not specified, defaults to false
22598func ResourceSparseApplyCenteredRMSPropUseLocking(value bool) ResourceSparseApplyCenteredRMSPropAttr {
22599	return func(m optionalAttr) {
22600		m["use_locking"] = value
22601	}
22602}
22603
22604// Update '*var' according to the centered RMSProp algorithm.
22605//
22606// The centered RMSProp algorithm uses an estimate of the centered second moment
22607// (i.e., the variance) for normalization, as opposed to regular RMSProp, which
22608// uses the (uncentered) second moment. This often helps with training, but is
22609// slightly more expensive in terms of computation and memory.
22610//
22611// Note that in dense implementation of this algorithm, mg, ms, and mom will
22612// update even if the grad is zero, but in this sparse implementation, mg, ms,
22613// and mom will not update in iterations during which the grad is zero.
22614//
22615// mean_square = decay * mean_square + (1-decay) * gradient ** 2
22616// mean_grad = decay * mean_grad + (1-decay) * gradient
22617// Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
22618//
22619// ms <- rho * ms_{t-1} + (1-rho) * grad * grad
22620// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
22621// var <- var - mom
22622//
22623// Arguments:
22624//	var_: Should be from a Variable().
22625//	mg: Should be from a Variable().
22626//	ms: Should be from a Variable().
22627//	mom: Should be from a Variable().
22628//	lr: Scaling factor. Must be a scalar.
22629//	rho: Decay rate. Must be a scalar.
22630//
22631//	epsilon: Ridge term. Must be a scalar.
22632//	grad: The gradient.
22633//	indices: A vector of indices into the first dimension of var, ms and mom.
22634//
22635// Returns the created operation.
22636func ResourceSparseApplyCenteredRMSProp(scope *Scope, var_ tf.Output, mg tf.Output, ms tf.Output, mom tf.Output, lr tf.Output, rho tf.Output, momentum tf.Output, epsilon tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyCenteredRMSPropAttr) (o *tf.Operation) {
22637	if scope.Err() != nil {
22638		return
22639	}
22640	attrs := map[string]interface{}{}
22641	for _, a := range optional {
22642		a(attrs)
22643	}
22644	opspec := tf.OpSpec{
22645		Type: "ResourceSparseApplyCenteredRMSProp",
22646		Input: []tf.Input{
22647			var_, mg, ms, mom, lr, rho, momentum, epsilon, grad, indices,
22648		},
22649		Attrs: attrs,
22650	}
22651	return scope.AddOperation(opspec)
22652}
22653
22654// Creates a dataset that batches `batch_size` elements from `input_dataset`.
22655//
22656// Arguments:
22657//
22658//	batch_size: A scalar representing the number of elements to accumulate in a
22659// batch.
22660//
22661//
22662func BatchDataset(scope *Scope, input_dataset tf.Output, batch_size tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
22663	if scope.Err() != nil {
22664		return
22665	}
22666	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
22667	opspec := tf.OpSpec{
22668		Type: "BatchDataset",
22669		Input: []tf.Input{
22670			input_dataset, batch_size,
22671		},
22672		Attrs: attrs,
22673	}
22674	op := scope.AddOperation(opspec)
22675	return op.Output(0)
22676}
22677
22678// LoadTPUEmbeddingAdadeltaParametersAttr is an optional argument to LoadTPUEmbeddingAdadeltaParameters.
22679type LoadTPUEmbeddingAdadeltaParametersAttr func(optionalAttr)
22680
22681// LoadTPUEmbeddingAdadeltaParametersTableId sets the optional table_id attribute to value.
22682// If not specified, defaults to -1
22683//
22684// REQUIRES: value >= -1
22685func LoadTPUEmbeddingAdadeltaParametersTableId(value int64) LoadTPUEmbeddingAdadeltaParametersAttr {
22686	return func(m optionalAttr) {
22687		m["table_id"] = value
22688	}
22689}
22690
22691// LoadTPUEmbeddingAdadeltaParametersTableName sets the optional table_name attribute to value.
22692// If not specified, defaults to ""
22693func LoadTPUEmbeddingAdadeltaParametersTableName(value string) LoadTPUEmbeddingAdadeltaParametersAttr {
22694	return func(m optionalAttr) {
22695		m["table_name"] = value
22696	}
22697}
22698
22699// Load Adadelta embedding parameters.
22700//
22701// An op that loads optimization parameters into HBM for embedding. Must be
22702// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
22703// embedding table configuration. For example, this op is used to install
22704// parameters that are loaded from a checkpoint before a training loop is
22705// executed.
22706//
22707// Arguments:
22708//	parameters: Value of parameters used in the Adadelta optimization algorithm.
22709//	accumulators: Value of accumulators used in the Adadelta optimization algorithm.
22710//	updates: Value of updates used in the Adadelta optimization algorithm.
22711//
22712//
22713//
22714// Returns the created operation.
22715func LoadTPUEmbeddingAdadeltaParameters(scope *Scope, parameters tf.Output, accumulators tf.Output, updates tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingAdadeltaParametersAttr) (o *tf.Operation) {
22716	if scope.Err() != nil {
22717		return
22718	}
22719	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
22720	for _, a := range optional {
22721		a(attrs)
22722	}
22723	opspec := tf.OpSpec{
22724		Type: "LoadTPUEmbeddingAdadeltaParameters",
22725		Input: []tf.Input{
22726			parameters, accumulators, updates,
22727		},
22728		Attrs: attrs,
22729	}
22730	return scope.AddOperation(opspec)
22731}
22732
22733// Converts each string in the input Tensor to its hash mod by a number of buckets.
22734//
22735// The hash function is deterministic on the content of the string within the
22736// process and will never change. However, it is not suitable for cryptography.
22737// This function may be used when CPU time is scarce and inputs are trusted or
22738// unimportant. There is a risk of adversaries constructing inputs that all hash
22739// to the same bucket. To prevent this problem, use a strong hash function with
22740// `tf.string_to_hash_bucket_strong`.
22741//
22742// Arguments:
22743//	input: The strings to assign a hash bucket.
22744//	num_buckets: The number of buckets.
22745//
22746// Returns A Tensor of the same shape as the input `string_tensor`.
22747func StringToHashBucketFast(scope *Scope, input tf.Output, num_buckets int64) (output tf.Output) {
22748	if scope.Err() != nil {
22749		return
22750	}
22751	attrs := map[string]interface{}{"num_buckets": num_buckets}
22752	opspec := tf.OpSpec{
22753		Type: "StringToHashBucketFast",
22754		Input: []tf.Input{
22755			input,
22756		},
22757		Attrs: attrs,
22758	}
22759	op := scope.AddOperation(opspec)
22760	return op.Output(0)
22761}
22762
22763// RealAttr is an optional argument to Real.
22764type RealAttr func(optionalAttr)
22765
22766// RealTout sets the optional Tout attribute to value.
22767// If not specified, defaults to DT_FLOAT
22768func RealTout(value tf.DataType) RealAttr {
22769	return func(m optionalAttr) {
22770		m["Tout"] = value
22771	}
22772}
22773
22774// Returns the real part of a complex number.
22775//
22776// Given a tensor `input` of complex numbers, this operation returns a tensor of
22777// type `float` that is the real part of each element in `input`. All elements in
22778// `input` must be complex numbers of the form \\(a + bj\\), where *a* is the real
22779//  part returned by this operation and *b* is the imaginary part.
22780//
22781// For example:
22782//
22783// ```
22784// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
22785// tf.real(input) ==> [-2.25, 3.25]
22786// ```
22787func Real(scope *Scope, input tf.Output, optional ...RealAttr) (output tf.Output) {
22788	if scope.Err() != nil {
22789		return
22790	}
22791	attrs := map[string]interface{}{}
22792	for _, a := range optional {
22793		a(attrs)
22794	}
22795	opspec := tf.OpSpec{
22796		Type: "Real",
22797		Input: []tf.Input{
22798			input,
22799		},
22800		Attrs: attrs,
22801	}
22802	op := scope.AddOperation(opspec)
22803	return op.Output(0)
22804}
22805
22806// AudioSummaryAttr is an optional argument to AudioSummary.
22807type AudioSummaryAttr func(optionalAttr)
22808
22809// AudioSummaryMaxOutputs sets the optional max_outputs attribute to value.
22810//
22811// value: Max number of batch elements to generate audio for.
22812// If not specified, defaults to 3
22813//
22814// REQUIRES: value >= 1
22815func AudioSummaryMaxOutputs(value int64) AudioSummaryAttr {
22816	return func(m optionalAttr) {
22817		m["max_outputs"] = value
22818	}
22819}
22820
22821// Outputs a `Summary` protocol buffer with audio.
22822//
22823// DEPRECATED at GraphDef version 15: Use AudioSummaryV2.
22824//
22825// The summary has up to `max_outputs` summary values containing audio. The
22826// audio is built from `tensor` which must be 3-D with shape `[batch_size,
22827// frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
22828// assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`.
22829//
22830// The `tag` argument is a scalar `Tensor` of type `string`.  It is used to
22831// build the `tag` of the summary values:
22832//
22833// *  If `max_outputs` is 1, the summary value tag is '*tag*/audio'.
22834// *  If `max_outputs` is greater than 1, the summary value tags are
22835//    generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.
22836//
22837// Arguments:
22838//	tag: Scalar. Used to build the `tag` attribute of the summary values.
22839//	tensor: 2-D of shape `[batch_size, frames]`.
22840//	sample_rate: The sample rate of the signal in hertz.
22841//
22842// Returns Scalar. Serialized `Summary` protocol buffer.
22843func AudioSummary(scope *Scope, tag tf.Output, tensor tf.Output, sample_rate float32, optional ...AudioSummaryAttr) (summary tf.Output) {
22844	if scope.Err() != nil {
22845		return
22846	}
22847	attrs := map[string]interface{}{"sample_rate": sample_rate}
22848	for _, a := range optional {
22849		a(attrs)
22850	}
22851	opspec := tf.OpSpec{
22852		Type: "AudioSummary",
22853		Input: []tf.Input{
22854			tag, tensor,
22855		},
22856		Attrs: attrs,
22857	}
22858	op := scope.AddOperation(opspec)
22859	return op.Output(0)
22860}
22861
22862// QrAttr is an optional argument to Qr.
22863type QrAttr func(optionalAttr)
22864
22865// QrFullMatrices sets the optional full_matrices attribute to value.
22866//
22867// value: If true, compute full-sized `q` and `r`. If false
22868// (the default), compute only the leading `P` columns of `q`.
22869// If not specified, defaults to false
22870func QrFullMatrices(value bool) QrAttr {
22871	return func(m optionalAttr) {
22872		m["full_matrices"] = value
22873	}
22874}
22875
22876// Computes the QR decompositions of one or more matrices.
22877//
22878// Computes the QR decomposition of each inner matrix in `tensor` such that
22879// `tensor[..., :, :] = q[..., :, :] * r[..., :,:])`
22880//
22881// ```python
22882// # a is a tensor.
22883// # q is a tensor of orthonormal matrices.
22884// # r is a tensor of upper triangular matrices.
22885// q, r = qr(a)
22886// q_full, r_full = qr(a, full_matrices=True)
22887// ```
22888//
22889// Arguments:
22890//	input: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions
22891// form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`.
22892//
22893// Returns Orthonormal basis for range of `a`. If `full_matrices` is `False` then
22894// shape is `[..., M, P]`; if `full_matrices` is `True` then shape is
22895// `[..., M, M]`.Triangular factor. If `full_matrices` is `False` then shape is
22896// `[..., P, N]`. If `full_matrices` is `True` then shape is `[..., M, N]`.
22897func Qr(scope *Scope, input tf.Output, optional ...QrAttr) (q tf.Output, r tf.Output) {
22898	if scope.Err() != nil {
22899		return
22900	}
22901	attrs := map[string]interface{}{}
22902	for _, a := range optional {
22903		a(attrs)
22904	}
22905	opspec := tf.OpSpec{
22906		Type: "Qr",
22907		Input: []tf.Input{
22908			input,
22909		},
22910		Attrs: attrs,
22911	}
22912	op := scope.AddOperation(opspec)
22913	return op.Output(0), op.Output(1)
22914}
22915
22916// TensorArrayV3Attr is an optional argument to TensorArrayV3.
22917type TensorArrayV3Attr func(optionalAttr)
22918
22919// TensorArrayV3ElementShape sets the optional element_shape attribute to value.
22920//
22921// value: The expected shape of an element, if known. Used to
22922// validate the shapes of TensorArray elements. If this shape is not
22923// fully specified, gathering zero-size TensorArrays is an error.
22924// If not specified, defaults to <unknown_rank:true >
22925func TensorArrayV3ElementShape(value tf.Shape) TensorArrayV3Attr {
22926	return func(m optionalAttr) {
22927		m["element_shape"] = value
22928	}
22929}
22930
22931// TensorArrayV3DynamicSize sets the optional dynamic_size attribute to value.
22932//
22933// value: A boolean that determines whether writes to the TensorArray
22934// are allowed to grow the size.  By default, this is not allowed.
22935// If not specified, defaults to false
22936func TensorArrayV3DynamicSize(value bool) TensorArrayV3Attr {
22937	return func(m optionalAttr) {
22938		m["dynamic_size"] = value
22939	}
22940}
22941
22942// TensorArrayV3ClearAfterRead sets the optional clear_after_read attribute to value.
22943//
22944// value: If true (default), Tensors in the TensorArray are cleared
22945// after being read.  This disables multiple read semantics but allows early
22946// release of memory.
22947// If not specified, defaults to true
22948func TensorArrayV3ClearAfterRead(value bool) TensorArrayV3Attr {
22949	return func(m optionalAttr) {
22950		m["clear_after_read"] = value
22951	}
22952}
22953
22954// TensorArrayV3IdenticalElementShapes sets the optional identical_element_shapes attribute to value.
22955//
22956// value: If true (default is false), then all
22957// elements in the TensorArray will be expected to have have identical shapes.
22958// This allows certain behaviors, like dynamically checking for
22959// consistent shapes on write, and being able to fill in properly
22960// shaped zero tensors on stack -- even if the element_shape attribute
22961// is not fully defined.
22962// If not specified, defaults to false
22963func TensorArrayV3IdenticalElementShapes(value bool) TensorArrayV3Attr {
22964	return func(m optionalAttr) {
22965		m["identical_element_shapes"] = value
22966	}
22967}
22968
22969// TensorArrayV3TensorArrayName sets the optional tensor_array_name attribute to value.
22970//
22971// value: Overrides the name used for the temporary tensor_array
22972// resource. Default value is the name of the 'TensorArray' op (which
22973// is guaranteed unique).
22974// If not specified, defaults to ""
22975func TensorArrayV3TensorArrayName(value string) TensorArrayV3Attr {
22976	return func(m optionalAttr) {
22977		m["tensor_array_name"] = value
22978	}
22979}
22980
22981// An array of Tensors of given size.
22982//
22983// Write data via Write and read via Read or Pack.
22984//
22985// Arguments:
22986//	size: The size of the array.
22987//	dtype: The type of the elements on the tensor_array.
22988//
22989// Returns The handle to the TensorArray.A scalar used to control gradient flow.
22990func TensorArrayV3(scope *Scope, size tf.Output, dtype tf.DataType, optional ...TensorArrayV3Attr) (handle tf.Output, flow tf.Output) {
22991	if scope.Err() != nil {
22992		return
22993	}
22994	attrs := map[string]interface{}{"dtype": dtype}
22995	for _, a := range optional {
22996		a(attrs)
22997	}
22998	opspec := tf.OpSpec{
22999		Type: "TensorArrayV3",
23000		Input: []tf.Input{
23001			size,
23002		},
23003		Attrs: attrs,
23004	}
23005	op := scope.AddOperation(opspec)
23006	return op.Output(0), op.Output(1)
23007}
23008
23009// Returns the truth value of NOT x element-wise.
23010func LogicalNot(scope *Scope, x tf.Output) (y tf.Output) {
23011	if scope.Err() != nil {
23012		return
23013	}
23014	opspec := tf.OpSpec{
23015		Type: "LogicalNot",
23016		Input: []tf.Input{
23017			x,
23018		},
23019	}
23020	op := scope.AddOperation(opspec)
23021	return op.Output(0)
23022}
23023
23024// 3D real-valued fast Fourier transform.
23025//
23026// Computes the 3-dimensional discrete Fourier transform of a real-valued signal
23027// over the inner-most 3 dimensions of `input`.
23028//
23029// Since the DFT of a real signal is Hermitian-symmetric, `RFFT3D` only returns the
23030// `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension
23031// of `output`: the zero-frequency term, followed by the `fft_length / 2`
23032// positive-frequency terms.
23033//
23034// Along each axis `RFFT3D` is computed on, if `fft_length` is smaller than the
23035// corresponding dimension of `input`, the dimension is cropped. If it is larger,
23036// the dimension is padded with zeros.
23037//
23038// Arguments:
23039//	input: A float32 tensor.
23040//	fft_length: An int32 tensor of shape [3]. The FFT length for each dimension.
23041//
23042// Returns A complex64 tensor of the same rank as `input`. The inner-most 3
23043//   dimensions of `input` are replaced with the their 3D Fourier transform. The
23044//   inner-most dimension contains `fft_length / 2 + 1` unique frequency
23045//   components.
23046//
23047// @compatibility(numpy)
23048// Equivalent to np.fft.rfftn with 3 dimensions.
23049// @end_compatibility
23050func RFFT3D(scope *Scope, input tf.Output, fft_length tf.Output) (output tf.Output) {
23051	if scope.Err() != nil {
23052		return
23053	}
23054	opspec := tf.OpSpec{
23055		Type: "RFFT3D",
23056		Input: []tf.Input{
23057			input, fft_length,
23058		},
23059	}
23060	op := scope.AddOperation(opspec)
23061	return op.Output(0)
23062}
23063
23064// Computes rectified linear: `max(features, 0)`.
23065func Relu(scope *Scope, features tf.Output) (activations tf.Output) {
23066	if scope.Err() != nil {
23067		return
23068	}
23069	opspec := tf.OpSpec{
23070		Type: "Relu",
23071		Input: []tf.Input{
23072			features,
23073		},
23074	}
23075	op := scope.AddOperation(opspec)
23076	return op.Output(0)
23077}
23078
23079// ResourceApplyAddSignAttr is an optional argument to ResourceApplyAddSign.
23080type ResourceApplyAddSignAttr func(optionalAttr)
23081
23082// ResourceApplyAddSignUseLocking sets the optional use_locking attribute to value.
23083//
23084// value: If `True`, updating of the var and m tensors is
23085// protected by a lock; otherwise the behavior is undefined, but may exhibit less
23086// contention.
23087// If not specified, defaults to false
23088func ResourceApplyAddSignUseLocking(value bool) ResourceApplyAddSignAttr {
23089	return func(m optionalAttr) {
23090		m["use_locking"] = value
23091	}
23092}
23093
23094// Update '*var' according to the AddSign update.
23095//
23096// m_t <- beta1 * m_{t-1} + (1 - beta1) * g
23097// update <- (alpha + sign_decay * sign(g) *sign(m)) * g
23098// variable <- variable - lr_t * update
23099//
23100// Arguments:
23101//	var_: Should be from a Variable().
23102//	m: Should be from a Variable().
23103//	lr: Scaling factor. Must be a scalar.
23104//	alpha: Must be a scalar.
23105//	sign_decay: Must be a scalar.
23106//	beta: Must be a scalar.
23107//	grad: The gradient.
23108//
23109// Returns the created operation.
23110func ResourceApplyAddSign(scope *Scope, var_ tf.Output, m tf.Output, lr tf.Output, alpha tf.Output, sign_decay tf.Output, beta tf.Output, grad tf.Output, optional ...ResourceApplyAddSignAttr) (o *tf.Operation) {
23111	if scope.Err() != nil {
23112		return
23113	}
23114	attrs := map[string]interface{}{}
23115	for _, a := range optional {
23116		a(attrs)
23117	}
23118	opspec := tf.OpSpec{
23119		Type: "ResourceApplyAddSign",
23120		Input: []tf.Input{
23121			var_, m, lr, alpha, sign_decay, beta, grad,
23122		},
23123		Attrs: attrs,
23124	}
23125	return scope.AddOperation(opspec)
23126}
23127
23128// Divides sparse updates into the variable referenced by `resource`.
23129//
23130// This operation computes
23131//
23132//     # Scalar indices
23133//     ref[indices, ...] /= updates[...]
23134//
23135//     # Vector indices (for each i)
23136//     ref[indices[i], ...] /= updates[i, ...]
23137//
23138//     # High rank indices (for each i, ..., j)
23139//     ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...]
23140//
23141// Duplicate entries are handled correctly: if multiple `indices` reference
23142// the same location, their contributions multiply.
23143//
23144// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
23145//
23146// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
23147// <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
23148// </div>
23149//
23150// Arguments:
23151//	resource: Should be from a `Variable` node.
23152//	indices: A tensor of indices into the first dimension of `ref`.
23153//	updates: A tensor of updated values to add to `ref`.
23154//
23155// Returns the created operation.
23156func ResourceScatterDiv(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation) {
23157	if scope.Err() != nil {
23158		return
23159	}
23160	opspec := tf.OpSpec{
23161		Type: "ResourceScatterDiv",
23162		Input: []tf.Input{
23163			resource, indices, updates,
23164		},
23165	}
23166	return scope.AddOperation(opspec)
23167}
23168
23169// ListDiffAttr is an optional argument to ListDiff.
23170type ListDiffAttr func(optionalAttr)
23171
23172// ListDiffOutIdx sets the optional out_idx attribute to value.
23173// If not specified, defaults to DT_INT32
23174func ListDiffOutIdx(value tf.DataType) ListDiffAttr {
23175	return func(m optionalAttr) {
23176		m["out_idx"] = value
23177	}
23178}
23179
23180// Computes the difference between two lists of numbers or strings.
23181//
23182// Given a list `x` and a list `y`, this operation returns a list `out` that
23183// represents all values that are in `x` but not in `y`. The returned list `out`
23184// is sorted in the same order that the numbers appear in `x` (duplicates are
23185// preserved). This operation also returns a list `idx` that represents the
23186// position of each `out` element in `x`. In other words:
23187//
23188// `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]`
23189//
23190// For example, given this input:
23191//
23192// ```
23193// x = [1, 2, 3, 4, 5, 6]
23194// y = [1, 3, 5]
23195// ```
23196//
23197// This operation would return:
23198//
23199// ```
23200// out ==> [2, 4, 6]
23201// idx ==> [1, 3, 5]
23202// ```
23203//
23204// Arguments:
23205//	x: 1-D. Values to keep.
23206//	y: 1-D. Values to remove.
23207//
23208// Returns 1-D. Values present in `x` but not in `y`.1-D. Positions of `x` values preserved in `out`.
23209func ListDiff(scope *Scope, x tf.Output, y tf.Output, optional ...ListDiffAttr) (out tf.Output, idx tf.Output) {
23210	if scope.Err() != nil {
23211		return
23212	}
23213	attrs := map[string]interface{}{}
23214	for _, a := range optional {
23215		a(attrs)
23216	}
23217	opspec := tf.OpSpec{
23218		Type: "ListDiff",
23219		Input: []tf.Input{
23220			x, y,
23221		},
23222		Attrs: attrs,
23223	}
23224	op := scope.AddOperation(opspec)
23225	return op.Output(0), op.Output(1)
23226}
23227
23228// LoadTPUEmbeddingAdadeltaParametersGradAccumDebugAttr is an optional argument to LoadTPUEmbeddingAdadeltaParametersGradAccumDebug.
23229type LoadTPUEmbeddingAdadeltaParametersGradAccumDebugAttr func(optionalAttr)
23230
23231// LoadTPUEmbeddingAdadeltaParametersGradAccumDebugTableId sets the optional table_id attribute to value.
23232// If not specified, defaults to -1
23233//
23234// REQUIRES: value >= -1
23235func LoadTPUEmbeddingAdadeltaParametersGradAccumDebugTableId(value int64) LoadTPUEmbeddingAdadeltaParametersGradAccumDebugAttr {
23236	return func(m optionalAttr) {
23237		m["table_id"] = value
23238	}
23239}
23240
23241// LoadTPUEmbeddingAdadeltaParametersGradAccumDebugTableName sets the optional table_name attribute to value.
23242// If not specified, defaults to ""
23243func LoadTPUEmbeddingAdadeltaParametersGradAccumDebugTableName(value string) LoadTPUEmbeddingAdadeltaParametersGradAccumDebugAttr {
23244	return func(m optionalAttr) {
23245		m["table_name"] = value
23246	}
23247}
23248
23249// Load Adadelta parameters with debug support.
23250//
23251// An op that loads optimization parameters into HBM for embedding. Must be
23252// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
23253// embedding table configuration. For example, this op is used to install
23254// parameters that are loaded from a checkpoint before a training loop is
23255// executed.
23256//
23257// Arguments:
23258//	parameters: Value of parameters used in the Adadelta optimization algorithm.
23259//	accumulators: Value of accumulators used in the Adadelta optimization algorithm.
23260//	updates: Value of updates used in the Adadelta optimization algorithm.
23261//	gradient_accumulators: Value of gradient_accumulators used in the Adadelta optimization algorithm.
23262//
23263//
23264//
23265// Returns the created operation.
23266func LoadTPUEmbeddingAdadeltaParametersGradAccumDebug(scope *Scope, parameters tf.Output, accumulators tf.Output, updates tf.Output, gradient_accumulators tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingAdadeltaParametersGradAccumDebugAttr) (o *tf.Operation) {
23267	if scope.Err() != nil {
23268		return
23269	}
23270	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
23271	for _, a := range optional {
23272		a(attrs)
23273	}
23274	opspec := tf.OpSpec{
23275		Type: "LoadTPUEmbeddingAdadeltaParametersGradAccumDebug",
23276		Input: []tf.Input{
23277			parameters, accumulators, updates, gradient_accumulators,
23278		},
23279		Attrs: attrs,
23280	}
23281	return scope.AddOperation(opspec)
23282}
23283
23284// Returns a serialized GraphDef representing `input_dataset`.
23285//
23286// Returns a graph representation for `input_dataset`.
23287//
23288// Arguments:
23289//	input_dataset: A variant tensor representing the dataset to return the graph representation for.
23290//
23291// Returns The graph representation of the dataset (as serialized GraphDef).
23292func DatasetToGraph(scope *Scope, input_dataset tf.Output) (graph tf.Output) {
23293	if scope.Err() != nil {
23294		return
23295	}
23296	opspec := tf.OpSpec{
23297		Type: "DatasetToGraph",
23298		Input: []tf.Input{
23299			input_dataset,
23300		},
23301	}
23302	op := scope.AddOperation(opspec)
23303	return op.Output(0)
23304}
23305
23306// MatrixSolveAttr is an optional argument to MatrixSolve.
23307type MatrixSolveAttr func(optionalAttr)
23308
23309// MatrixSolveAdjoint sets the optional adjoint attribute to value.
23310//
23311// value: Boolean indicating whether to solve with `matrix` or its (block-wise)
23312// adjoint.
23313// If not specified, defaults to false
23314func MatrixSolveAdjoint(value bool) MatrixSolveAttr {
23315	return func(m optionalAttr) {
23316		m["adjoint"] = value
23317	}
23318}
23319
23320// Solves systems of linear equations.
23321//
23322// `Matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
23323// form square matrices. `Rhs` is a tensor of shape `[..., M, K]`. The `output` is
23324// a tensor shape `[..., M, K]`.  If `adjoint` is `False` then each output matrix
23325// satisfies `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`.
23326// If `adjoint` is `True` then each output matrix satisfies
23327// `adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]`.
23328//
23329// Arguments:
23330//	matrix: Shape is `[..., M, M]`.
23331//	rhs: Shape is `[..., M, K]`.
23332//
23333// Returns Shape is `[..., M, K]`.
23334func MatrixSolve(scope *Scope, matrix tf.Output, rhs tf.Output, optional ...MatrixSolveAttr) (output tf.Output) {
23335	if scope.Err() != nil {
23336		return
23337	}
23338	attrs := map[string]interface{}{}
23339	for _, a := range optional {
23340		a(attrs)
23341	}
23342	opspec := tf.OpSpec{
23343		Type: "MatrixSolve",
23344		Input: []tf.Input{
23345			matrix, rhs,
23346		},
23347		Attrs: attrs,
23348	}
23349	op := scope.AddOperation(opspec)
23350	return op.Output(0)
23351}
23352
23353// ResourceApplyKerasMomentumAttr is an optional argument to ResourceApplyKerasMomentum.
23354type ResourceApplyKerasMomentumAttr func(optionalAttr)
23355
23356// ResourceApplyKerasMomentumUseLocking sets the optional use_locking attribute to value.
23357//
23358// value: If `True`, updating of the var and accum tensors will be protected
23359// by a lock; otherwise the behavior is undefined, but may exhibit less
23360// contention.
23361// If not specified, defaults to false
23362func ResourceApplyKerasMomentumUseLocking(value bool) ResourceApplyKerasMomentumAttr {
23363	return func(m optionalAttr) {
23364		m["use_locking"] = value
23365	}
23366}
23367
23368// ResourceApplyKerasMomentumUseNesterov sets the optional use_nesterov attribute to value.
23369//
23370// value: If `True`, the tensor passed to compute grad will be
23371// var + momentum * accum, so in the end, the var you get is actually
23372// var + momentum * accum.
23373// If not specified, defaults to false
23374func ResourceApplyKerasMomentumUseNesterov(value bool) ResourceApplyKerasMomentumAttr {
23375	return func(m optionalAttr) {
23376		m["use_nesterov"] = value
23377	}
23378}
23379
23380// Update '*var' according to the momentum scheme. Set use_nesterov = True if you
23381//
23382// want to use Nesterov momentum.
23383//
23384// accum = accum * momentum - lr * grad
23385// var += accum
23386//
23387// Arguments:
23388//	var_: Should be from a Variable().
23389//	accum: Should be from a Variable().
23390//	lr: Scaling factor. Must be a scalar.
23391//	grad: The gradient.
23392//	momentum: Momentum. Must be a scalar.
23393//
23394// Returns the created operation.
23395func ResourceApplyKerasMomentum(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, momentum tf.Output, optional ...ResourceApplyKerasMomentumAttr) (o *tf.Operation) {
23396	if scope.Err() != nil {
23397		return
23398	}
23399	attrs := map[string]interface{}{}
23400	for _, a := range optional {
23401		a(attrs)
23402	}
23403	opspec := tf.OpSpec{
23404		Type: "ResourceApplyKerasMomentum",
23405		Input: []tf.Input{
23406			var_, accum, lr, grad, momentum,
23407		},
23408		Attrs: attrs,
23409	}
23410	return scope.AddOperation(opspec)
23411}
23412
23413// Return a tensor with the same shape and contents as the input tensor or value.
23414func Identity(scope *Scope, input tf.Output) (output tf.Output) {
23415	if scope.Err() != nil {
23416		return
23417	}
23418	opspec := tf.OpSpec{
23419		Type: "Identity",
23420		Input: []tf.Input{
23421			input,
23422		},
23423	}
23424	op := scope.AddOperation(opspec)
23425	return op.Output(0)
23426}
23427
23428// Computes arctangent of `y/x` element-wise, respecting signs of the arguments.
23429//
23430// This is the angle \( \theta \in [-\pi, \pi] \) such that
23431// \[ x = r \cos(\theta) \]
23432// and
23433// \[ y = r \sin(\theta) \]
23434// where \(r = \sqrt(x^2 + y^2) \).
23435func Atan2(scope *Scope, y tf.Output, x tf.Output) (z tf.Output) {
23436	if scope.Err() != nil {
23437		return
23438	}
23439	opspec := tf.OpSpec{
23440		Type: "Atan2",
23441		Input: []tf.Input{
23442			y, x,
23443		},
23444	}
23445	op := scope.AddOperation(opspec)
23446	return op.Output(0)
23447}
23448
23449//     Updates specified rows with values in `v`.
23450//
23451//     Computes `x[i, :] = v; return x`.
23452//
23453// Arguments:
23454//	x: A tensor of type `T`.
23455//	i: A vector. Indices into the left-most dimension of `x`.
23456//	v: A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size.
23457//
23458// Returns A `Tensor` of type T. An alias of `x`. The content of `y` is undefined if there are duplicates in `i`.
23459func InplaceUpdate(scope *Scope, x tf.Output, i tf.Output, v tf.Output) (y tf.Output) {
23460	if scope.Err() != nil {
23461		return
23462	}
23463	opspec := tf.OpSpec{
23464		Type: "InplaceUpdate",
23465		Input: []tf.Input{
23466			x, i, v,
23467		},
23468	}
23469	op := scope.AddOperation(opspec)
23470	return op.Output(0)
23471}
23472
23473// OutfeedDequeueTupleAttr is an optional argument to OutfeedDequeueTuple.
23474type OutfeedDequeueTupleAttr func(optionalAttr)
23475
23476// OutfeedDequeueTupleDeviceOrdinal sets the optional device_ordinal attribute to value.
23477//
23478// value: The TPU device to use. This should be -1 when the Op
23479// is running on a TPU device, and >= 0 when the Op is running on the CPU
23480// device.
23481// If not specified, defaults to -1
23482func OutfeedDequeueTupleDeviceOrdinal(value int64) OutfeedDequeueTupleAttr {
23483	return func(m optionalAttr) {
23484		m["device_ordinal"] = value
23485	}
23486}
23487
23488// Retrieve multiple values from the computation outfeed.
23489//
23490// This operation will block indefinitely until data is available. Output `i`
23491// corresponds to XLA tuple element `i`.
23492//
23493// Arguments:
23494//	dtypes: The element types of each element in `outputs`.
23495//	shapes: The shapes of each tensor in `outputs`.
23496//
23497// Returns A list of tensors that will be read from the outfeed.
23498func OutfeedDequeueTuple(scope *Scope, dtypes []tf.DataType, shapes []tf.Shape, optional ...OutfeedDequeueTupleAttr) (outputs []tf.Output) {
23499	if scope.Err() != nil {
23500		return
23501	}
23502	attrs := map[string]interface{}{"dtypes": dtypes, "shapes": shapes}
23503	for _, a := range optional {
23504		a(attrs)
23505	}
23506	opspec := tf.OpSpec{
23507		Type: "OutfeedDequeueTuple",
23508
23509		Attrs: attrs,
23510	}
23511	op := scope.AddOperation(opspec)
23512	if scope.Err() != nil {
23513		return
23514	}
23515	var idx int
23516	var err error
23517	if outputs, idx, err = makeOutputList(op, idx, "outputs"); err != nil {
23518		scope.UpdateErr("OutfeedDequeueTuple", err)
23519		return
23520	}
23521	return outputs
23522}
23523
23524// Identity op for gradient debugging.
23525//
23526// This op is hidden from public in Python. It is used by TensorFlow Debugger to
23527// register gradient tensors for gradient debugging.
23528// This op operates on non-reference-type tensors.
23529func DebugGradientIdentity(scope *Scope, input tf.Output) (output tf.Output) {
23530	if scope.Err() != nil {
23531		return
23532	}
23533	opspec := tf.OpSpec{
23534		Type: "DebugGradientIdentity",
23535		Input: []tf.Input{
23536			input,
23537		},
23538	}
23539	op := scope.AddOperation(opspec)
23540	return op.Output(0)
23541}
23542
23543// ResourceSparseApplyAdadeltaAttr is an optional argument to ResourceSparseApplyAdadelta.
23544type ResourceSparseApplyAdadeltaAttr func(optionalAttr)
23545
23546// ResourceSparseApplyAdadeltaUseLocking sets the optional use_locking attribute to value.
23547//
23548// value: If True, updating of the var and accum tensors will be protected by
23549// a lock; otherwise the behavior is undefined, but may exhibit less contention.
23550// If not specified, defaults to false
23551func ResourceSparseApplyAdadeltaUseLocking(value bool) ResourceSparseApplyAdadeltaAttr {
23552	return func(m optionalAttr) {
23553		m["use_locking"] = value
23554	}
23555}
23556
23557// var: Should be from a Variable().
23558//
23559// Arguments:
23560//
23561//	accum: Should be from a Variable().
23562//	accum_update: : Should be from a Variable().
23563//	lr: Learning rate. Must be a scalar.
23564//	rho: Decay factor. Must be a scalar.
23565//	epsilon: Constant factor. Must be a scalar.
23566//	grad: The gradient.
23567//	indices: A vector of indices into the first dimension of var and accum.
23568//
23569// Returns the created operation.
23570func ResourceSparseApplyAdadelta(scope *Scope, var_ tf.Output, accum tf.Output, accum_update tf.Output, lr tf.Output, rho tf.Output, epsilon tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyAdadeltaAttr) (o *tf.Operation) {
23571	if scope.Err() != nil {
23572		return
23573	}
23574	attrs := map[string]interface{}{}
23575	for _, a := range optional {
23576		a(attrs)
23577	}
23578	opspec := tf.OpSpec{
23579		Type: "ResourceSparseApplyAdadelta",
23580		Input: []tf.Input{
23581			var_, accum, accum_update, lr, rho, epsilon, grad, indices,
23582		},
23583		Attrs: attrs,
23584	}
23585	return scope.AddOperation(opspec)
23586}
23587
23588// Returns which elements of x are NaN.
23589//
23590// @compatibility(numpy)
23591// Equivalent to np.isnan
23592// @end_compatibility
23593func IsNan(scope *Scope, x tf.Output) (y tf.Output) {
23594	if scope.Err() != nil {
23595		return
23596	}
23597	opspec := tf.OpSpec{
23598		Type: "IsNan",
23599		Input: []tf.Input{
23600			x,
23601		},
23602	}
23603	op := scope.AddOperation(opspec)
23604	return op.Output(0)
23605}
23606
23607// DepthwiseConv2dNativeBackpropFilterAttr is an optional argument to DepthwiseConv2dNativeBackpropFilter.
23608type DepthwiseConv2dNativeBackpropFilterAttr func(optionalAttr)
23609
23610// DepthwiseConv2dNativeBackpropFilterDataFormat sets the optional data_format attribute to value.
23611//
23612// value: Specify the data format of the input and output data. With the
23613// default format "NHWC", the data is stored in the order of:
23614//     [batch, height, width, channels].
23615// Alternatively, the format could be "NCHW", the data storage order of:
23616//     [batch, channels, height, width].
23617// If not specified, defaults to "NHWC"
23618func DepthwiseConv2dNativeBackpropFilterDataFormat(value string) DepthwiseConv2dNativeBackpropFilterAttr {
23619	return func(m optionalAttr) {
23620		m["data_format"] = value
23621	}
23622}
23623
23624// DepthwiseConv2dNativeBackpropFilterDilations sets the optional dilations attribute to value.
23625//
23626// value: 1-D tensor of length 4.  The dilation factor for each dimension of
23627// `input`. If set to k > 1, there will be k-1 skipped cells between each filter
23628// element on that dimension. The dimension order is determined by the value of
23629// `data_format`, see above for details. Dilations in the batch and depth
23630// dimensions must be 1.
23631// If not specified, defaults to <i:1 i:1 i:1 i:1 >
23632func DepthwiseConv2dNativeBackpropFilterDilations(value []int64) DepthwiseConv2dNativeBackpropFilterAttr {
23633	return func(m optionalAttr) {
23634		m["dilations"] = value
23635	}
23636}
23637
23638// Computes the gradients of depthwise convolution with respect to the filter.
23639//
23640// Arguments:
23641//	input: 4-D with shape based on `data_format`.  For example, if
23642// `data_format` is 'NHWC' then `input` is a 4-D `[batch, in_height,
23643// in_width, in_channels]` tensor.
23644//	filter_sizes: An integer vector representing the tensor shape of `filter`,
23645// where `filter` is a 4-D
23646// `[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor.
23647//	out_backprop: 4-D with shape  based on `data_format`.
23648// For example, if `data_format` is 'NHWC' then
23649// out_backprop shape is `[batch, out_height, out_width, out_channels]`.
23650// Gradients w.r.t. the output of the convolution.
23651//	strides: The stride of the sliding window for each dimension of the input
23652// of the convolution.
23653//	padding: The type of padding algorithm to use.
23654//
23655// Returns 4-D with shape
23656// `[filter_height, filter_width, in_channels, out_channels]`.  Gradient w.r.t.
23657// the `filter` input of the convolution.
23658func DepthwiseConv2dNativeBackpropFilter(scope *Scope, input tf.Output, filter_sizes tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...DepthwiseConv2dNativeBackpropFilterAttr) (output tf.Output) {
23659	if scope.Err() != nil {
23660		return
23661	}
23662	attrs := map[string]interface{}{"strides": strides, "padding": padding}
23663	for _, a := range optional {
23664		a(attrs)
23665	}
23666	opspec := tf.OpSpec{
23667		Type: "DepthwiseConv2dNativeBackpropFilter",
23668		Input: []tf.Input{
23669			input, filter_sizes, out_backprop,
23670		},
23671		Attrs: attrs,
23672	}
23673	op := scope.AddOperation(opspec)
23674	return op.Output(0)
23675}
23676
23677// MapUnstageAttr is an optional argument to MapUnstage.
23678type MapUnstageAttr func(optionalAttr)
23679
23680// MapUnstageCapacity sets the optional capacity attribute to value.
23681// If not specified, defaults to 0
23682//
23683// REQUIRES: value >= 0
23684func MapUnstageCapacity(value int64) MapUnstageAttr {
23685	return func(m optionalAttr) {
23686		m["capacity"] = value
23687	}
23688}
23689
23690// MapUnstageMemoryLimit sets the optional memory_limit attribute to value.
23691// If not specified, defaults to 0
23692//
23693// REQUIRES: value >= 0
23694func MapUnstageMemoryLimit(value int64) MapUnstageAttr {
23695	return func(m optionalAttr) {
23696		m["memory_limit"] = value
23697	}
23698}
23699
23700// MapUnstageContainer sets the optional container attribute to value.
23701// If not specified, defaults to ""
23702func MapUnstageContainer(value string) MapUnstageAttr {
23703	return func(m optionalAttr) {
23704		m["container"] = value
23705	}
23706}
23707
23708// MapUnstageSharedName sets the optional shared_name attribute to value.
23709// If not specified, defaults to ""
23710func MapUnstageSharedName(value string) MapUnstageAttr {
23711	return func(m optionalAttr) {
23712		m["shared_name"] = value
23713	}
23714}
23715
23716// Op removes and returns the values associated with the key
23717//
23718// from the underlying container.   If the underlying container
23719// does not contain this key, the op will block until it does.
23720func MapUnstage(scope *Scope, key tf.Output, indices tf.Output, dtypes []tf.DataType, optional ...MapUnstageAttr) (values []tf.Output) {
23721	if scope.Err() != nil {
23722		return
23723	}
23724	attrs := map[string]interface{}{"dtypes": dtypes}
23725	for _, a := range optional {
23726		a(attrs)
23727	}
23728	opspec := tf.OpSpec{
23729		Type: "MapUnstage",
23730		Input: []tf.Input{
23731			key, indices,
23732		},
23733		Attrs: attrs,
23734	}
23735	op := scope.AddOperation(opspec)
23736	if scope.Err() != nil {
23737		return
23738	}
23739	var idx int
23740	var err error
23741	if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
23742		scope.UpdateErr("MapUnstage", err)
23743		return
23744	}
23745	return values
23746}
23747
23748// An op enabling differentiation of TPU Embeddings.
23749//
23750// This op simply returns its first input, which is assumed to have been sliced
23751// from the Tensors returned by TPUEmbeddingDequeueActivations. The presence of
23752// this op, and its first argument being a trainable Variable, enables automatic
23753// differentiation of graphs containing embeddings via the TPU Embedding Python
23754// libraries.
23755//
23756// Arguments:
23757//	embedding_variable: A trainable variable, enabling optimizers to find this op.
23758//	sliced_activations: The embedding activations Tensor to return.
23759//	table_id: The id of the table in the embedding layer configuration from which
23760// these activations were computed.
23761//	lookup_id: Identifier of the set of embedding indices which produced these
23762// activations.
23763func TPUEmbeddingActivations(scope *Scope, embedding_variable tf.Output, sliced_activations tf.Output, table_id int64, lookup_id int64) (output tf.Output) {
23764	if scope.Err() != nil {
23765		return
23766	}
23767	attrs := map[string]interface{}{"table_id": table_id, "lookup_id": lookup_id}
23768	opspec := tf.OpSpec{
23769		Type: "TPUEmbeddingActivations",
23770		Input: []tf.Input{
23771			embedding_variable, sliced_activations,
23772		},
23773		Attrs: attrs,
23774	}
23775	op := scope.AddOperation(opspec)
23776	return op.Output(0)
23777}
23778
23779// BatchToSpace for 4-D tensors of type T.
23780//
23781// This is a legacy version of the more general BatchToSpaceND.
23782//
23783// Rearranges (permutes) data from batch into blocks of spatial data, followed by
23784// cropping. This is the reverse transformation of SpaceToBatch. More specifically,
23785// this op outputs a copy of the input tensor where values from the `batch`
23786// dimension are moved in spatial blocks to the `height` and `width` dimensions,
23787// followed by cropping along the `height` and `width` dimensions.
23788//
23789// Arguments:
23790//	input: 4-D tensor with shape
23791// `[batch*block_size*block_size, height_pad/block_size, width_pad/block_size,
23792//   depth]`. Note that the batch size of the input tensor must be divisible by
23793// `block_size * block_size`.
23794//	crops: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies
23795// how many elements to crop from the intermediate result across the spatial
23796// dimensions as follows:
23797//
23798//     crops = [[crop_top, crop_bottom], [crop_left, crop_right]]
23799//
23800//
23801// Returns 4-D with shape `[batch, height, width, depth]`, where:
23802//
23803//       height = height_pad - crop_top - crop_bottom
23804//       width = width_pad - crop_left - crop_right
23805//
23806// The attr `block_size` must be greater than one. It indicates the block size.
23807//
23808// Some examples:
23809//
23810// (1) For the following input of shape `[4, 1, 1, 1]` and block_size of 2:
23811//
23812// ```
23813// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
23814// ```
23815//
23816// The output tensor has shape `[1, 2, 2, 1]` and value:
23817//
23818// ```
23819// x = [[[[1], [2]], [[3], [4]]]]
23820// ```
23821//
23822// (2) For the following input of shape `[4, 1, 1, 3]` and block_size of 2:
23823//
23824// ```
23825// [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
23826// ```
23827//
23828// The output tensor has shape `[1, 2, 2, 3]` and value:
23829//
23830// ```
23831// x = [[[[1, 2, 3], [4, 5, 6]],
23832//       [[7, 8, 9], [10, 11, 12]]]]
23833// ```
23834//
23835// (3) For the following input of shape `[4, 2, 2, 1]` and block_size of 2:
23836//
23837// ```
23838// x = [[[[1], [3]], [[9], [11]]],
23839//      [[[2], [4]], [[10], [12]]],
23840//      [[[5], [7]], [[13], [15]]],
23841//      [[[6], [8]], [[14], [16]]]]
23842// ```
23843//
23844// The output tensor has shape `[1, 4, 4, 1]` and value:
23845//
23846// ```
23847// x = [[[1],   [2],  [3],  [4]],
23848//      [[5],   [6],  [7],  [8]],
23849//      [[9],  [10], [11],  [12]],
23850//      [[13], [14], [15],  [16]]]
23851// ```
23852//
23853// (4) For the following input of shape `[8, 1, 2, 1]` and block_size of 2:
23854//
23855// ```
23856// x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
23857//      [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
23858// ```
23859//
23860// The output tensor has shape `[2, 2, 4, 1]` and value:
23861//
23862// ```
23863// x = [[[[1], [3]], [[5], [7]]],
23864//      [[[2], [4]], [[10], [12]]],
23865//      [[[5], [7]], [[13], [15]]],
23866//      [[[6], [8]], [[14], [16]]]]
23867// ```
23868func BatchToSpace(scope *Scope, input tf.Output, crops tf.Output, block_size int64) (output tf.Output) {
23869	if scope.Err() != nil {
23870		return
23871	}
23872	attrs := map[string]interface{}{"block_size": block_size}
23873	opspec := tf.OpSpec{
23874		Type: "BatchToSpace",
23875		Input: []tf.Input{
23876			input, crops,
23877		},
23878		Attrs: attrs,
23879	}
23880	op := scope.AddOperation(opspec)
23881	return op.Output(0)
23882}
23883
23884// Produces a summary of any statistics recorded by the given statistics manager.
23885func ExperimentalStatsAggregatorSummary(scope *Scope, iterator tf.Output) (summary tf.Output) {
23886	if scope.Err() != nil {
23887		return
23888	}
23889	opspec := tf.OpSpec{
23890		Type: "ExperimentalStatsAggregatorSummary",
23891		Input: []tf.Input{
23892			iterator,
23893		},
23894	}
23895	op := scope.AddOperation(opspec)
23896	return op.Output(0)
23897}
23898
23899// Makes a new iterator from the given `dataset` and stores it in `iterator`.
23900//
23901// This operation may be executed multiple times. Each execution will reset the
23902// iterator in `iterator` to the first element of `dataset`.
23903//
23904// Returns the created operation.
23905func MakeIterator(scope *Scope, dataset tf.Output, iterator tf.Output) (o *tf.Operation) {
23906	if scope.Err() != nil {
23907		return
23908	}
23909	opspec := tf.OpSpec{
23910		Type: "MakeIterator",
23911		Input: []tf.Input{
23912			dataset, iterator,
23913		},
23914	}
23915	return scope.AddOperation(opspec)
23916}
23917
23918// Component-wise divides a SparseTensor by a dense Tensor.
23919//
23920// *Limitation*: this Op only broadcasts the dense side to the sparse side, but not
23921// the other direction.
23922//
23923// Arguments:
23924//	sp_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
23925// SparseTensor, possibly not in canonical ordering.
23926//	sp_values: 1-D.  `N` non-empty values corresponding to `sp_indices`.
23927//	sp_shape: 1-D.  Shape of the input SparseTensor.
23928//	dense: `R`-D.  The dense Tensor operand.
23929//
23930// Returns 1-D.  The `N` values that are operated on.
23931func SparseDenseCwiseDiv(scope *Scope, sp_indices tf.Output, sp_values tf.Output, sp_shape tf.Output, dense tf.Output) (output tf.Output) {
23932	if scope.Err() != nil {
23933		return
23934	}
23935	opspec := tf.OpSpec{
23936		Type: "SparseDenseCwiseDiv",
23937		Input: []tf.Input{
23938			sp_indices, sp_values, sp_shape, dense,
23939		},
23940	}
23941	op := scope.AddOperation(opspec)
23942	return op.Output(0)
23943}
23944
23945// Creates a dataset that batches and pads `batch_size` elements from the input.
23946//
23947// Arguments:
23948//
23949//	batch_size: A scalar representing the number of elements to accumulate in a
23950// batch.
23951//	padded_shapes: A list of int64 tensors representing the desired padded shapes
23952// of the corresponding output components. These shapes may be partially
23953// specified, using `-1` to indicate that a particular dimension should be
23954// padded to the maximum size of all batch elements.
23955//	padding_values: A list of scalars containing the padding value to use for
23956// each of the outputs.
23957//
23958func PaddedBatchDataset(scope *Scope, input_dataset tf.Output, batch_size tf.Output, padded_shapes []tf.Output, padding_values []tf.Output, output_shapes []tf.Shape) (handle tf.Output) {
23959	if scope.Err() != nil {
23960		return
23961	}
23962	attrs := map[string]interface{}{"output_shapes": output_shapes}
23963	opspec := tf.OpSpec{
23964		Type: "PaddedBatchDataset",
23965		Input: []tf.Input{
23966			input_dataset, batch_size, tf.OutputList(padded_shapes), tf.OutputList(padding_values),
23967		},
23968		Attrs: attrs,
23969	}
23970	op := scope.AddOperation(opspec)
23971	return op.Output(0)
23972}
23973
23974// ResourceApplyMomentumAttr is an optional argument to ResourceApplyMomentum.
23975type ResourceApplyMomentumAttr func(optionalAttr)
23976
23977// ResourceApplyMomentumUseLocking sets the optional use_locking attribute to value.
23978//
23979// value: If `True`, updating of the var and accum tensors will be protected
23980// by a lock; otherwise the behavior is undefined, but may exhibit less
23981// contention.
23982// If not specified, defaults to false
23983func ResourceApplyMomentumUseLocking(value bool) ResourceApplyMomentumAttr {
23984	return func(m optionalAttr) {
23985		m["use_locking"] = value
23986	}
23987}
23988
23989// ResourceApplyMomentumUseNesterov sets the optional use_nesterov attribute to value.
23990//
23991// value: If `True`, the tensor passed to compute grad will be
23992// var - lr * momentum * accum, so in the end, the var you get is actually
23993// var - lr * momentum * accum.
23994// If not specified, defaults to false
23995func ResourceApplyMomentumUseNesterov(value bool) ResourceApplyMomentumAttr {
23996	return func(m optionalAttr) {
23997		m["use_nesterov"] = value
23998	}
23999}
24000
24001// Update '*var' according to the momentum scheme. Set use_nesterov = True if you
24002//
24003// want to use Nesterov momentum.
24004//
24005// accum = accum * momentum + grad
24006// var -= lr * accum
24007//
24008// Arguments:
24009//	var_: Should be from a Variable().
24010//	accum: Should be from a Variable().
24011//	lr: Scaling factor. Must be a scalar.
24012//	grad: The gradient.
24013//	momentum: Momentum. Must be a scalar.
24014//
24015// Returns the created operation.
24016func ResourceApplyMomentum(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, momentum tf.Output, optional ...ResourceApplyMomentumAttr) (o *tf.Operation) {
24017	if scope.Err() != nil {
24018		return
24019	}
24020	attrs := map[string]interface{}{}
24021	for _, a := range optional {
24022		a(attrs)
24023	}
24024	opspec := tf.OpSpec{
24025		Type: "ResourceApplyMomentum",
24026		Input: []tf.Input{
24027			var_, accum, lr, grad, momentum,
24028		},
24029		Attrs: attrs,
24030	}
24031	return scope.AddOperation(opspec)
24032}
24033
24034// MaxPoolGradGradAttr is an optional argument to MaxPoolGradGrad.
24035type MaxPoolGradGradAttr func(optionalAttr)
24036
24037// MaxPoolGradGradDataFormat sets the optional data_format attribute to value.
24038//
24039// value: Specify the data format of the input and output data. With the
24040// default format "NHWC", the data is stored in the order of:
24041//     [batch, in_height, in_width, in_channels].
24042// Alternatively, the format could be "NCHW", the data storage order of:
24043//     [batch, in_channels, in_height, in_width].
24044// If not specified, defaults to "NHWC"
24045func MaxPoolGradGradDataFormat(value string) MaxPoolGradGradAttr {
24046	return func(m optionalAttr) {
24047		m["data_format"] = value
24048	}
24049}
24050
24051// Computes second-order gradients of the maxpooling function.
24052//
24053// Arguments:
24054//	orig_input: The original input tensor.
24055//	orig_output: The original output tensor.
24056//	grad: 4-D.  Gradients of gradients w.r.t. the input of `max_pool`.
24057//	ksize: The size of the window for each dimension of the input tensor.
24058//	strides: The stride of the sliding window for each dimension of the
24059// input tensor.
24060//	padding: The type of padding algorithm to use.
24061//
24062// Returns Gradients of gradients w.r.t. the input to `max_pool`.
24063func MaxPoolGradGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPoolGradGradAttr) (output tf.Output) {
24064	if scope.Err() != nil {
24065		return
24066	}
24067	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
24068	for _, a := range optional {
24069		a(attrs)
24070	}
24071	opspec := tf.OpSpec{
24072		Type: "MaxPoolGradGrad",
24073		Input: []tf.Input{
24074			orig_input, orig_output, grad,
24075		},
24076		Attrs: attrs,
24077	}
24078	op := scope.AddOperation(opspec)
24079	return op.Output(0)
24080}
24081
24082// Returns the last element of the input list as well as a list with all but that element.
24083//
24084// Fails if the list is empty.
24085//
24086// input_handle: the input list
24087// tensor: the withdrawn last element of the list
24088// element_dtype: the type of elements in the list
24089// element_shape: the shape of the output tensor
24090func TensorListPopBack(scope *Scope, input_handle tf.Output, element_shape tf.Output, element_dtype tf.DataType) (output_handle tf.Output, tensor tf.Output) {
24091	if scope.Err() != nil {
24092		return
24093	}
24094	attrs := map[string]interface{}{"element_dtype": element_dtype}
24095	opspec := tf.OpSpec{
24096		Type: "TensorListPopBack",
24097		Input: []tf.Input{
24098			input_handle, element_shape,
24099		},
24100		Attrs: attrs,
24101	}
24102	op := scope.AddOperation(opspec)
24103	return op.Output(0), op.Output(1)
24104}
24105
24106// Determine the script codes of a given tensor of Unicode integer code points.
24107//
24108// This operation converts Unicode code points to script codes corresponding to
24109// each code point. Script codes correspond to International Components for
24110// Unicode (ICU) UScriptCode values. See http://icu-project.org/apiref/icu4c/uscript_8h.html.
24111// Returns -1 (USCRIPT_INVALID_CODE) for invalid codepoints. Output shape will
24112// match input shape.
24113//
24114// Arguments:
24115//	input: A Tensor of int32 Unicode code points.
24116//
24117// Returns A Tensor of int32 script codes corresponding to each input code point.
24118func UnicodeScript(scope *Scope, input tf.Output) (output tf.Output) {
24119	if scope.Err() != nil {
24120		return
24121	}
24122	opspec := tf.OpSpec{
24123		Type: "UnicodeScript",
24124		Input: []tf.Input{
24125			input,
24126		},
24127	}
24128	op := scope.AddOperation(opspec)
24129	return op.Output(0)
24130}
24131
24132// Creates a sequence of numbers.
24133//
24134// This operation creates a sequence of numbers that begins at `start` and
24135// extends by increments of `delta` up to but not including `limit`.
24136//
24137// For example:
24138//
24139// ```
24140// # 'start' is 3
24141// # 'limit' is 18
24142// # 'delta' is 3
24143// tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]
24144// ```
24145//
24146// Arguments:
24147//	start: 0-D (scalar). First entry in the sequence.
24148//	limit: 0-D (scalar). Upper limit of sequence, exclusive.
24149//	delta: 0-D (scalar). Optional. Default is 1. Number that increments `start`.
24150//
24151// Returns 1-D.
24152func Range(scope *Scope, start tf.Output, limit tf.Output, delta tf.Output) (output tf.Output) {
24153	if scope.Err() != nil {
24154		return
24155	}
24156	opspec := tf.OpSpec{
24157		Type: "Range",
24158		Input: []tf.Input{
24159			start, limit, delta,
24160		},
24161	}
24162	op := scope.AddOperation(opspec)
24163	return op.Output(0)
24164}
24165
24166// MaxPoolGradGradWithArgmaxAttr is an optional argument to MaxPoolGradGradWithArgmax.
24167type MaxPoolGradGradWithArgmaxAttr func(optionalAttr)
24168
24169// MaxPoolGradGradWithArgmaxIncludeBatchInIndex sets the optional include_batch_in_index attribute to value.
24170//
24171// value: Whether to include batch dimension in flattened index of `argmax`.
24172// If not specified, defaults to false
24173func MaxPoolGradGradWithArgmaxIncludeBatchInIndex(value bool) MaxPoolGradGradWithArgmaxAttr {
24174	return func(m optionalAttr) {
24175		m["include_batch_in_index"] = value
24176	}
24177}
24178
24179// Computes second-order gradients of the maxpooling function.
24180//
24181// Arguments:
24182//	input: The original input.
24183//	grad: 4-D with shape `[batch, height, width, channels]`.  Gradients w.r.t. the
24184// input of `max_pool`.
24185//	argmax: The indices of the maximum values chosen for each output of `max_pool`.
24186//	ksize: The size of the window for each dimension of the input tensor.
24187//	strides: The stride of the sliding window for each dimension of the
24188// input tensor.
24189//	padding: The type of padding algorithm to use.
24190//
24191// Returns Gradients of gradients w.r.t. the input of `max_pool`.
24192func MaxPoolGradGradWithArgmax(scope *Scope, input tf.Output, grad tf.Output, argmax tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPoolGradGradWithArgmaxAttr) (output tf.Output) {
24193	if scope.Err() != nil {
24194		return
24195	}
24196	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
24197	for _, a := range optional {
24198		a(attrs)
24199	}
24200	opspec := tf.OpSpec{
24201		Type: "MaxPoolGradGradWithArgmax",
24202		Input: []tf.Input{
24203			input, grad, argmax,
24204		},
24205		Attrs: attrs,
24206	}
24207	op := scope.AddOperation(opspec)
24208	return op.Output(0)
24209}
24210
24211// Return a slice from 'input'.
24212//
24213// The output tensor is a tensor with dimensions described by 'size'
24214// whose values are extracted from 'input' starting at the offsets in
24215// 'begin'.
24216//
24217// *Requirements*:
24218//   0 <= begin[i] <= begin[i] + size[i] <= Di  for i in [0, n)
24219//
24220// Arguments:
24221//
24222//	begin: begin[i] specifies the offset into the 'i'th dimension of
24223// 'input' to slice from.
24224//	size: size[i] specifies the number of elements of the 'i'th dimension
24225// of 'input' to slice. If size[i] is -1, all remaining elements in dimension
24226// i are included in the slice (i.e. this is equivalent to setting
24227// size[i] = input.dim_size(i) - begin[i]).
24228func Slice(scope *Scope, input tf.Output, begin tf.Output, size tf.Output) (output tf.Output) {
24229	if scope.Err() != nil {
24230		return
24231	}
24232	opspec := tf.OpSpec{
24233		Type: "Slice",
24234		Input: []tf.Input{
24235			input, begin, size,
24236		},
24237	}
24238	op := scope.AddOperation(opspec)
24239	return op.Output(0)
24240}
24241
24242// Compute the Hurwitz zeta function \\(\zeta(x, q)\\).
24243//
24244// The Hurwitz zeta function is defined as:
24245//
24246//
24247// \\(\zeta(x, q) = \sum_{n=0}^{\infty} (q + n)^{-x}\\)
24248func Zeta(scope *Scope, x tf.Output, q tf.Output) (z tf.Output) {
24249	if scope.Err() != nil {
24250		return
24251	}
24252	opspec := tf.OpSpec{
24253		Type: "Zeta",
24254		Input: []tf.Input{
24255			x, q,
24256		},
24257	}
24258	op := scope.AddOperation(opspec)
24259	return op.Output(0)
24260}
24261
24262// Returns the cardinality of `input_dataset`.
24263//
24264// Returns the cardinality of `input_dataset`.
24265//
24266// Arguments:
24267//	input_dataset: A variant tensor representing the dataset to return cardinality for.
24268//
24269// Returns The cardinality of `input_dataset`. Named constants are used to represent
24270// infinite and unknown cardinality.
24271func ExperimentalDatasetCardinality(scope *Scope, input_dataset tf.Output) (cardinality tf.Output) {
24272	if scope.Err() != nil {
24273		return
24274	}
24275	opspec := tf.OpSpec{
24276		Type: "ExperimentalDatasetCardinality",
24277		Input: []tf.Input{
24278			input_dataset,
24279		},
24280	}
24281	op := scope.AddOperation(opspec)
24282	return op.Output(0)
24283}
24284
24285// TakeManySparseFromTensorsMapAttr is an optional argument to TakeManySparseFromTensorsMap.
24286type TakeManySparseFromTensorsMapAttr func(optionalAttr)
24287
24288// TakeManySparseFromTensorsMapContainer sets the optional container attribute to value.
24289//
24290// value: The container name for the `SparseTensorsMap` read by this op.
24291// If not specified, defaults to ""
24292func TakeManySparseFromTensorsMapContainer(value string) TakeManySparseFromTensorsMapAttr {
24293	return func(m optionalAttr) {
24294		m["container"] = value
24295	}
24296}
24297
24298// TakeManySparseFromTensorsMapSharedName sets the optional shared_name attribute to value.
24299//
24300// value: The shared name for the `SparseTensorsMap` read by this op.
24301// It should not be blank; rather the `shared_name` or unique Operation name
24302// of the Op that created the original `SparseTensorsMap` should be used.
24303// If not specified, defaults to ""
24304func TakeManySparseFromTensorsMapSharedName(value string) TakeManySparseFromTensorsMapAttr {
24305	return func(m optionalAttr) {
24306		m["shared_name"] = value
24307	}
24308}
24309
24310// Read `SparseTensors` from a `SparseTensorsMap` and concatenate them.
24311//
24312// The input `sparse_handles` must be an `int64` matrix of shape `[N, 1]` where
24313// `N` is the minibatch size and the rows correspond to the output handles of
24314// `AddSparseToTensorsMap` or `AddManySparseToTensorsMap`.  The ranks of the
24315// original `SparseTensor` objects that went into the given input ops must all
24316// match.  When the final `SparseTensor` is created, it has rank one
24317// higher than the ranks of the incoming `SparseTensor` objects
24318// (they have been concatenated along a new row dimension on the left).
24319//
24320// The output `SparseTensor` object's shape values for all dimensions but the
24321// first are the max across the input `SparseTensor` objects' shape values
24322// for the corresponding dimensions.  Its first shape value is `N`, the minibatch
24323// size.
24324//
24325// The input `SparseTensor` objects' indices are assumed ordered in
24326// standard lexicographic order.  If this is not the case, after this
24327// step run `SparseReorder` to restore index ordering.
24328//
24329// For example, if the handles represent an input, which is a `[2, 3]` matrix
24330// representing two original `SparseTensor` objects:
24331//
24332// ```
24333//     index = [ 0]
24334//             [10]
24335//             [20]
24336//     values = [1, 2, 3]
24337//     shape = [50]
24338// ```
24339//
24340// and
24341//
24342// ```
24343//     index = [ 2]
24344//             [10]
24345//     values = [4, 5]
24346//     shape = [30]
24347// ```
24348//
24349// then the final `SparseTensor` will be:
24350//
24351// ```
24352//     index = [0  0]
24353//             [0 10]
24354//             [0 20]
24355//             [1  2]
24356//             [1 10]
24357//     values = [1, 2, 3, 4, 5]
24358//     shape = [2 50]
24359// ```
24360//
24361// Arguments:
24362//	sparse_handles: 1-D, The `N` serialized `SparseTensor` objects.
24363// Shape: `[N]`.
24364//	dtype: The `dtype` of the `SparseTensor` objects stored in the
24365// `SparseTensorsMap`.
24366//
24367// Returns 2-D.  The `indices` of the minibatch `SparseTensor`.1-D.  The `values` of the minibatch `SparseTensor`.1-D.  The `shape` of the minibatch `SparseTensor`.
24368func TakeManySparseFromTensorsMap(scope *Scope, sparse_handles tf.Output, dtype tf.DataType, optional ...TakeManySparseFromTensorsMapAttr) (sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output) {
24369	if scope.Err() != nil {
24370		return
24371	}
24372	attrs := map[string]interface{}{"dtype": dtype}
24373	for _, a := range optional {
24374		a(attrs)
24375	}
24376	opspec := tf.OpSpec{
24377		Type: "TakeManySparseFromTensorsMap",
24378		Input: []tf.Input{
24379			sparse_handles,
24380		},
24381		Attrs: attrs,
24382	}
24383	op := scope.AddOperation(opspec)
24384	return op.Output(0), op.Output(1), op.Output(2)
24385}
24386
24387// NonDeterministicIntsAttr is an optional argument to NonDeterministicInts.
24388type NonDeterministicIntsAttr func(optionalAttr)
24389
24390// NonDeterministicIntsDtype sets the optional dtype attribute to value.
24391//
24392// value: The type of the output.
24393// If not specified, defaults to DT_INT64
24394func NonDeterministicIntsDtype(value tf.DataType) NonDeterministicIntsAttr {
24395	return func(m optionalAttr) {
24396		m["dtype"] = value
24397	}
24398}
24399
24400// Non-deterministically generates some integers.
24401//
24402// This op may use some OS-provided source of non-determinism (e.g. an RNG), so each execution will give different results.
24403//
24404// Arguments:
24405//	shape: The shape of the output tensor.
24406//
24407// Returns Non-deterministic integer values with specified shape.
24408func NonDeterministicInts(scope *Scope, shape tf.Output, optional ...NonDeterministicIntsAttr) (output tf.Output) {
24409	if scope.Err() != nil {
24410		return
24411	}
24412	attrs := map[string]interface{}{}
24413	for _, a := range optional {
24414		a(attrs)
24415	}
24416	opspec := tf.OpSpec{
24417		Type: "NonDeterministicInts",
24418		Input: []tf.Input{
24419			shape,
24420		},
24421		Attrs: attrs,
24422	}
24423	op := scope.AddOperation(opspec)
24424	return op.Output(0)
24425}
24426
24427// ResourceSparseApplyKerasMomentumAttr is an optional argument to ResourceSparseApplyKerasMomentum.
24428type ResourceSparseApplyKerasMomentumAttr func(optionalAttr)
24429
24430// ResourceSparseApplyKerasMomentumUseLocking sets the optional use_locking attribute to value.
24431//
24432// value: If `True`, updating of the var and accum tensors will be protected
24433// by a lock; otherwise the behavior is undefined, but may exhibit less
24434// contention.
24435// If not specified, defaults to false
24436func ResourceSparseApplyKerasMomentumUseLocking(value bool) ResourceSparseApplyKerasMomentumAttr {
24437	return func(m optionalAttr) {
24438		m["use_locking"] = value
24439	}
24440}
24441
24442// ResourceSparseApplyKerasMomentumUseNesterov sets the optional use_nesterov attribute to value.
24443//
24444// value: If `True`, the tensor passed to compute grad will be
24445// var + momentum * accum, so in the end, the var you get is actually
24446// var + momentum * accum.
24447// If not specified, defaults to false
24448func ResourceSparseApplyKerasMomentumUseNesterov(value bool) ResourceSparseApplyKerasMomentumAttr {
24449	return func(m optionalAttr) {
24450		m["use_nesterov"] = value
24451	}
24452}
24453
24454// Update relevant entries in '*var' and '*accum' according to the momentum scheme.
24455//
24456// Set use_nesterov = True if you want to use Nesterov momentum.
24457//
24458// That is for rows we have grad for, we update var and accum as follows:
24459//
24460// accum = accum * momentum - lr * grad
24461// var += accum
24462//
24463// Arguments:
24464//	var_: Should be from a Variable().
24465//	accum: Should be from a Variable().
24466//	lr: Learning rate. Must be a scalar.
24467//	grad: The gradient.
24468//	indices: A vector of indices into the first dimension of var and accum.
24469//	momentum: Momentum. Must be a scalar.
24470//
24471// Returns the created operation.
24472func ResourceSparseApplyKerasMomentum(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, indices tf.Output, momentum tf.Output, optional ...ResourceSparseApplyKerasMomentumAttr) (o *tf.Operation) {
24473	if scope.Err() != nil {
24474		return
24475	}
24476	attrs := map[string]interface{}{}
24477	for _, a := range optional {
24478		a(attrs)
24479	}
24480	opspec := tf.OpSpec{
24481		Type: "ResourceSparseApplyKerasMomentum",
24482		Input: []tf.Input{
24483			var_, accum, lr, grad, indices, momentum,
24484		},
24485		Attrs: attrs,
24486	}
24487	return scope.AddOperation(opspec)
24488}
24489
24490// ResourceApplyAdamWithAmsgradAttr is an optional argument to ResourceApplyAdamWithAmsgrad.
24491type ResourceApplyAdamWithAmsgradAttr func(optionalAttr)
24492
24493// ResourceApplyAdamWithAmsgradUseLocking sets the optional use_locking attribute to value.
24494//
24495// value: If `True`, updating of the var, m, and v tensors will be protected
24496// by a lock; otherwise the behavior is undefined, but may exhibit less
24497// contention.
24498// If not specified, defaults to false
24499func ResourceApplyAdamWithAmsgradUseLocking(value bool) ResourceApplyAdamWithAmsgradAttr {
24500	return func(m optionalAttr) {
24501		m["use_locking"] = value
24502	}
24503}
24504
24505// Update '*var' according to the Adam algorithm.
24506//
24507// $$lr_t := \text{learning\_rate} * \sqrt{1 - beta_2^t} / (1 - beta_1^t)$$
24508// $$m_t := beta_1 * m_{t-1} + (1 - beta_1) * g$$
24509// $$v_t := beta_2 * v_{t-1} + (1 - beta_2) * g * g$$
24510// $$vhat_t := max{vhat_{t-1}, v_t}$$
24511// $$variable := variable - lr_t * m_t / (\sqrt{vhat_t} + \epsilon)$$
24512//
24513// Arguments:
24514//	var_: Should be from a Variable().
24515//	m: Should be from a Variable().
24516//	v: Should be from a Variable().
24517//	vhat: Should be from a Variable().
24518//	beta1_power: Must be a scalar.
24519//	beta2_power: Must be a scalar.
24520//	lr: Scaling factor. Must be a scalar.
24521//	beta1: Momentum factor. Must be a scalar.
24522//	beta2: Momentum factor. Must be a scalar.
24523//	epsilon: Ridge term. Must be a scalar.
24524//	grad: The gradient.
24525//
24526// Returns the created operation.
24527func ResourceApplyAdamWithAmsgrad(scope *Scope, var_ tf.Output, m tf.Output, v tf.Output, vhat tf.Output, beta1_power tf.Output, beta2_power tf.Output, lr tf.Output, beta1 tf.Output, beta2 tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyAdamWithAmsgradAttr) (o *tf.Operation) {
24528	if scope.Err() != nil {
24529		return
24530	}
24531	attrs := map[string]interface{}{}
24532	for _, a := range optional {
24533		a(attrs)
24534	}
24535	opspec := tf.OpSpec{
24536		Type: "ResourceApplyAdamWithAmsgrad",
24537		Input: []tf.Input{
24538			var_, m, v, vhat, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad,
24539		},
24540		Attrs: attrs,
24541	}
24542	return scope.AddOperation(opspec)
24543}
24544
24545// MapUnstageNoKeyAttr is an optional argument to MapUnstageNoKey.
24546type MapUnstageNoKeyAttr func(optionalAttr)
24547
24548// MapUnstageNoKeyCapacity sets the optional capacity attribute to value.
24549// If not specified, defaults to 0
24550//
24551// REQUIRES: value >= 0
24552func MapUnstageNoKeyCapacity(value int64) MapUnstageNoKeyAttr {
24553	return func(m optionalAttr) {
24554		m["capacity"] = value
24555	}
24556}
24557
24558// MapUnstageNoKeyMemoryLimit sets the optional memory_limit attribute to value.
24559// If not specified, defaults to 0
24560//
24561// REQUIRES: value >= 0
24562func MapUnstageNoKeyMemoryLimit(value int64) MapUnstageNoKeyAttr {
24563	return func(m optionalAttr) {
24564		m["memory_limit"] = value
24565	}
24566}
24567
24568// MapUnstageNoKeyContainer sets the optional container attribute to value.
24569// If not specified, defaults to ""
24570func MapUnstageNoKeyContainer(value string) MapUnstageNoKeyAttr {
24571	return func(m optionalAttr) {
24572		m["container"] = value
24573	}
24574}
24575
24576// MapUnstageNoKeySharedName sets the optional shared_name attribute to value.
24577// If not specified, defaults to ""
24578func MapUnstageNoKeySharedName(value string) MapUnstageNoKeyAttr {
24579	return func(m optionalAttr) {
24580		m["shared_name"] = value
24581	}
24582}
24583
24584// Op removes and returns a random (key, value)
24585//
24586// from the underlying container.   If the underlying container
24587// does not contain elements, the op will block until it does.
24588func MapUnstageNoKey(scope *Scope, indices tf.Output, dtypes []tf.DataType, optional ...MapUnstageNoKeyAttr) (key tf.Output, values []tf.Output) {
24589	if scope.Err() != nil {
24590		return
24591	}
24592	attrs := map[string]interface{}{"dtypes": dtypes}
24593	for _, a := range optional {
24594		a(attrs)
24595	}
24596	opspec := tf.OpSpec{
24597		Type: "MapUnstageNoKey",
24598		Input: []tf.Input{
24599			indices,
24600		},
24601		Attrs: attrs,
24602	}
24603	op := scope.AddOperation(opspec)
24604	if scope.Err() != nil {
24605		return
24606	}
24607	var idx int
24608	var err error
24609	key = op.Output(idx)
24610	if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
24611		scope.UpdateErr("MapUnstageNoKey", err)
24612		return
24613	}
24614	return key, values
24615}
24616
24617// HashTableV2Attr is an optional argument to HashTableV2.
24618type HashTableV2Attr func(optionalAttr)
24619
24620// HashTableV2Container sets the optional container attribute to value.
24621//
24622// value: If non-empty, this table is placed in the given container.
24623// Otherwise, a default container is used.
24624// If not specified, defaults to ""
24625func HashTableV2Container(value string) HashTableV2Attr {
24626	return func(m optionalAttr) {
24627		m["container"] = value
24628	}
24629}
24630
24631// HashTableV2SharedName sets the optional shared_name attribute to value.
24632//
24633// value: If non-empty, this table is shared under the given name across
24634// multiple sessions.
24635// If not specified, defaults to ""
24636func HashTableV2SharedName(value string) HashTableV2Attr {
24637	return func(m optionalAttr) {
24638		m["shared_name"] = value
24639	}
24640}
24641
24642// HashTableV2UseNodeNameSharing sets the optional use_node_name_sharing attribute to value.
24643//
24644// value: If true and shared_name is empty, the table is shared
24645// using the node name.
24646// If not specified, defaults to false
24647func HashTableV2UseNodeNameSharing(value bool) HashTableV2Attr {
24648	return func(m optionalAttr) {
24649		m["use_node_name_sharing"] = value
24650	}
24651}
24652
24653// Creates a non-initialized hash table.
24654//
24655// This op creates a hash table, specifying the type of its keys and values.
24656// Before using the table you will have to initialize it.  After initialization the
24657// table will be immutable.
24658//
24659// Arguments:
24660//	key_dtype: Type of the table keys.
24661//	value_dtype: Type of the table values.
24662//
24663// Returns Handle to a table.
24664func HashTableV2(scope *Scope, key_dtype tf.DataType, value_dtype tf.DataType, optional ...HashTableV2Attr) (table_handle tf.Output) {
24665	if scope.Err() != nil {
24666		return
24667	}
24668	attrs := map[string]interface{}{"key_dtype": key_dtype, "value_dtype": value_dtype}
24669	for _, a := range optional {
24670		a(attrs)
24671	}
24672	opspec := tf.OpSpec{
24673		Type: "HashTableV2",
24674
24675		Attrs: attrs,
24676	}
24677	op := scope.AddOperation(opspec)
24678	return op.Output(0)
24679}
24680
24681// RetrieveTPUEmbeddingMomentumParametersGradAccumDebugAttr is an optional argument to RetrieveTPUEmbeddingMomentumParametersGradAccumDebug.
24682type RetrieveTPUEmbeddingMomentumParametersGradAccumDebugAttr func(optionalAttr)
24683
24684// RetrieveTPUEmbeddingMomentumParametersGradAccumDebugTableId sets the optional table_id attribute to value.
24685// If not specified, defaults to -1
24686//
24687// REQUIRES: value >= -1
24688func RetrieveTPUEmbeddingMomentumParametersGradAccumDebugTableId(value int64) RetrieveTPUEmbeddingMomentumParametersGradAccumDebugAttr {
24689	return func(m optionalAttr) {
24690		m["table_id"] = value
24691	}
24692}
24693
24694// RetrieveTPUEmbeddingMomentumParametersGradAccumDebugTableName sets the optional table_name attribute to value.
24695// If not specified, defaults to ""
24696func RetrieveTPUEmbeddingMomentumParametersGradAccumDebugTableName(value string) RetrieveTPUEmbeddingMomentumParametersGradAccumDebugAttr {
24697	return func(m optionalAttr) {
24698		m["table_name"] = value
24699	}
24700}
24701
24702// Retrieve Momentum embedding parameters with debug support.
24703//
24704// An op that retrieves optimization parameters from embedding to host
24705// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
24706// the correct embedding table configuration. For example, this op is
24707// used to retrieve updated parameters before saving a checkpoint.
24708//
24709// Returns Parameter parameters updated by the Momentum optimization algorithm.Parameter momenta updated by the Momentum optimization algorithm.Parameter gradient_accumulators updated by the Momentum optimization algorithm.
24710func RetrieveTPUEmbeddingMomentumParametersGradAccumDebug(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingMomentumParametersGradAccumDebugAttr) (parameters tf.Output, momenta tf.Output, gradient_accumulators tf.Output) {
24711	if scope.Err() != nil {
24712		return
24713	}
24714	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
24715	for _, a := range optional {
24716		a(attrs)
24717	}
24718	opspec := tf.OpSpec{
24719		Type: "RetrieveTPUEmbeddingMomentumParametersGradAccumDebug",
24720
24721		Attrs: attrs,
24722	}
24723	op := scope.AddOperation(opspec)
24724	return op.Output(0), op.Output(1), op.Output(2)
24725}
24726
24727// Enqueue a Tensor on the computation outfeed.
24728//
24729// Arguments:
24730//	input: A tensor that will be inserted into the outfeed queue.
24731//
24732// Returns the created operation.
24733func OutfeedEnqueue(scope *Scope, input tf.Output) (o *tf.Operation) {
24734	if scope.Err() != nil {
24735		return
24736	}
24737	opspec := tf.OpSpec{
24738		Type: "OutfeedEnqueue",
24739		Input: []tf.Input{
24740			input,
24741		},
24742	}
24743	return scope.AddOperation(opspec)
24744}
24745
24746// Outputs a `Summary` protocol buffer with a histogram.
24747//
24748// The generated
24749// [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
24750// has one summary value containing a histogram for `values`.
24751//
24752// This op reports an `InvalidArgument` error if any value is not finite.
24753//
24754// Arguments:
24755//	tag: Scalar.  Tag to use for the `Summary.Value`.
24756//	values: Any shape. Values to use to build the histogram.
24757//
24758// Returns Scalar. Serialized `Summary` protocol buffer.
24759func HistogramSummary(scope *Scope, tag tf.Output, values tf.Output) (summary tf.Output) {
24760	if scope.Err() != nil {
24761		return
24762	}
24763	opspec := tf.OpSpec{
24764		Type: "HistogramSummary",
24765		Input: []tf.Input{
24766			tag, values,
24767		},
24768	}
24769	op := scope.AddOperation(opspec)
24770	return op.Output(0)
24771}
24772
24773// MutableDenseHashTableV2Attr is an optional argument to MutableDenseHashTableV2.
24774type MutableDenseHashTableV2Attr func(optionalAttr)
24775
24776// MutableDenseHashTableV2Container sets the optional container attribute to value.
24777//
24778// value: If non-empty, this table is placed in the given container.
24779// Otherwise, a default container is used.
24780// If not specified, defaults to ""
24781func MutableDenseHashTableV2Container(value string) MutableDenseHashTableV2Attr {
24782	return func(m optionalAttr) {
24783		m["container"] = value
24784	}
24785}
24786
24787// MutableDenseHashTableV2SharedName sets the optional shared_name attribute to value.
24788//
24789// value: If non-empty, this table is shared under the given name across
24790// multiple sessions.
24791// If not specified, defaults to ""
24792func MutableDenseHashTableV2SharedName(value string) MutableDenseHashTableV2Attr {
24793	return func(m optionalAttr) {
24794		m["shared_name"] = value
24795	}
24796}
24797
24798// MutableDenseHashTableV2UseNodeNameSharing sets the optional use_node_name_sharing attribute to value.
24799// If not specified, defaults to false
24800func MutableDenseHashTableV2UseNodeNameSharing(value bool) MutableDenseHashTableV2Attr {
24801	return func(m optionalAttr) {
24802		m["use_node_name_sharing"] = value
24803	}
24804}
24805
24806// MutableDenseHashTableV2ValueShape sets the optional value_shape attribute to value.
24807//
24808// value: The shape of each value.
24809// If not specified, defaults to <>
24810func MutableDenseHashTableV2ValueShape(value tf.Shape) MutableDenseHashTableV2Attr {
24811	return func(m optionalAttr) {
24812		m["value_shape"] = value
24813	}
24814}
24815
24816// MutableDenseHashTableV2InitialNumBuckets sets the optional initial_num_buckets attribute to value.
24817//
24818// value: The initial number of hash table buckets. Must be a power
24819// to 2.
24820// If not specified, defaults to 131072
24821func MutableDenseHashTableV2InitialNumBuckets(value int64) MutableDenseHashTableV2Attr {
24822	return func(m optionalAttr) {
24823		m["initial_num_buckets"] = value
24824	}
24825}
24826
24827// MutableDenseHashTableV2MaxLoadFactor sets the optional max_load_factor attribute to value.
24828//
24829// value: The maximum ratio between number of entries and number of
24830// buckets before growing the table. Must be between 0 and 1.
24831// If not specified, defaults to 0.8
24832func MutableDenseHashTableV2MaxLoadFactor(value float32) MutableDenseHashTableV2Attr {
24833	return func(m optionalAttr) {
24834		m["max_load_factor"] = value
24835	}
24836}
24837
24838// Creates an empty hash table that uses tensors as the backing store.
24839//
24840// It uses "open addressing" with quadratic reprobing to resolve
24841// collisions.
24842//
24843// This op creates a mutable hash table, specifying the type of its keys and
24844// values. Each value must be a scalar. Data can be inserted into the table using
24845// the insert operations. It does not support the initialization operation.
24846//
24847// Arguments:
24848//	empty_key: The key used to represent empty key buckets internally. Must not
24849// be used in insert or lookup operations.
24850//
24851//	value_dtype: Type of the table values.
24852//
24853// Returns Handle to a table.
24854func MutableDenseHashTableV2(scope *Scope, empty_key tf.Output, deleted_key tf.Output, value_dtype tf.DataType, optional ...MutableDenseHashTableV2Attr) (table_handle tf.Output) {
24855	if scope.Err() != nil {
24856		return
24857	}
24858	attrs := map[string]interface{}{"value_dtype": value_dtype}
24859	for _, a := range optional {
24860		a(attrs)
24861	}
24862	opspec := tf.OpSpec{
24863		Type: "MutableDenseHashTableV2",
24864		Input: []tf.Input{
24865			empty_key, deleted_key,
24866		},
24867		Attrs: attrs,
24868	}
24869	op := scope.AddOperation(opspec)
24870	return op.Output(0)
24871}
24872
24873// Deprecated. Use TensorArraySplitV3
24874//
24875// DEPRECATED at GraphDef version 26: Use TensorArraySplitV3
24876func TensorArraySplitV2(scope *Scope, handle tf.Output, value tf.Output, lengths tf.Output, flow_in tf.Output) (flow_out tf.Output) {
24877	if scope.Err() != nil {
24878		return
24879	}
24880	opspec := tf.OpSpec{
24881		Type: "TensorArraySplitV2",
24882		Input: []tf.Input{
24883			handle, value, lengths, flow_in,
24884		},
24885	}
24886	op := scope.AddOperation(opspec)
24887	return op.Output(0)
24888}
24889
24890// Reshapes a SparseTensor to represent values in a new dense shape.
24891//
24892// This operation has the same semantics as reshape on the represented dense
24893// tensor.  The `input_indices` are recomputed based on the requested `new_shape`.
24894//
24895// If one component of `new_shape` is the special value -1, the size of that
24896// dimension is computed so that the total dense size remains constant.  At
24897// most one component of `new_shape` can be -1.  The number of dense elements
24898// implied by `new_shape` must be the same as the number of dense elements
24899// originally implied by `input_shape`.
24900//
24901// Reshaping does not affect the order of values in the SparseTensor.
24902//
24903// If the input tensor has rank `R_in` and `N` non-empty values, and `new_shape`
24904// has length `R_out`, then `input_indices` has shape `[N, R_in]`,
24905// `input_shape` has length `R_in`, `output_indices` has shape `[N, R_out]`, and
24906// `output_shape` has length `R_out`.
24907//
24908// Arguments:
24909//	input_indices: 2-D.  `N x R_in` matrix with the indices of non-empty values in a
24910// SparseTensor.
24911//	input_shape: 1-D.  `R_in` vector with the input SparseTensor's dense shape.
24912//	new_shape: 1-D.  `R_out` vector with the requested new dense shape.
24913//
24914// Returns 2-D.  `N x R_out` matrix with the updated indices of non-empty
24915// values in the output SparseTensor.1-D.  `R_out` vector with the full dense shape of the output
24916// SparseTensor.  This is the same as `new_shape` but with any -1 dimensions
24917// filled in.
24918func SparseReshape(scope *Scope, input_indices tf.Output, input_shape tf.Output, new_shape tf.Output) (output_indices tf.Output, output_shape tf.Output) {
24919	if scope.Err() != nil {
24920		return
24921	}
24922	opspec := tf.OpSpec{
24923		Type: "SparseReshape",
24924		Input: []tf.Input{
24925			input_indices, input_shape, new_shape,
24926		},
24927	}
24928	op := scope.AddOperation(opspec)
24929	return op.Output(0), op.Output(1)
24930}
24931
24932// Computes the product along segments of a tensor.
24933//
24934// Read
24935// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
24936// for an explanation of segments.
24937//
24938// Computes a tensor such that
24939// \\(output_i = \prod_j data_j\\) where the product is over `j` such
24940// that `segment_ids[j] == i`.
24941//
24942// If the product is empty for a given segment ID `i`, `output[i] = 1`.
24943//
24944// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
24945// <img style="width:100%" src="https://www.tensorflow.org/images/SegmentProd.png" alt>
24946// </div>
24947//
24948// For example:
24949//
24950// ```
24951// c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]])
24952// tf.segment_prod(c, tf.constant([0, 0, 1]))
24953// # ==> [[4, 6, 6, 4],
24954// #      [5, 6, 7, 8]]
24955// ```
24956//
24957//
24958// Arguments:
24959//
24960//	segment_ids: A 1-D tensor whose size is equal to the size of `data`'s
24961// first dimension.  Values should be sorted and can be repeated.
24962//
24963// Returns Has same shape as data, except for dimension 0 which
24964// has size `k`, the number of segments.
24965func SegmentProd(scope *Scope, data tf.Output, segment_ids tf.Output) (output tf.Output) {
24966	if scope.Err() != nil {
24967		return
24968	}
24969	opspec := tf.OpSpec{
24970		Type: "SegmentProd",
24971		Input: []tf.Input{
24972			data, segment_ids,
24973		},
24974	}
24975	op := scope.AddOperation(opspec)
24976	return op.Output(0)
24977}
24978
24979// RetrieveTPUEmbeddingADAMParametersGradAccumDebugAttr is an optional argument to RetrieveTPUEmbeddingADAMParametersGradAccumDebug.
24980type RetrieveTPUEmbeddingADAMParametersGradAccumDebugAttr func(optionalAttr)
24981
24982// RetrieveTPUEmbeddingADAMParametersGradAccumDebugTableId sets the optional table_id attribute to value.
24983// If not specified, defaults to -1
24984//
24985// REQUIRES: value >= -1
24986func RetrieveTPUEmbeddingADAMParametersGradAccumDebugTableId(value int64) RetrieveTPUEmbeddingADAMParametersGradAccumDebugAttr {
24987	return func(m optionalAttr) {
24988		m["table_id"] = value
24989	}
24990}
24991
24992// RetrieveTPUEmbeddingADAMParametersGradAccumDebugTableName sets the optional table_name attribute to value.
24993// If not specified, defaults to ""
24994func RetrieveTPUEmbeddingADAMParametersGradAccumDebugTableName(value string) RetrieveTPUEmbeddingADAMParametersGradAccumDebugAttr {
24995	return func(m optionalAttr) {
24996		m["table_name"] = value
24997	}
24998}
24999
25000// Retrieve ADAM embedding parameters with debug support.
25001//
25002// An op that retrieves optimization parameters from embedding to host
25003// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
25004// the correct embedding table configuration. For example, this op is
25005// used to retrieve updated parameters before saving a checkpoint.
25006//
25007// Returns Parameter parameters updated by the ADAM optimization algorithm.Parameter momenta updated by the ADAM optimization algorithm.Parameter velocities updated by the ADAM optimization algorithm.Parameter gradient_accumulators updated by the ADAM optimization algorithm.
25008func RetrieveTPUEmbeddingADAMParametersGradAccumDebug(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingADAMParametersGradAccumDebugAttr) (parameters tf.Output, momenta tf.Output, velocities tf.Output, gradient_accumulators tf.Output) {
25009	if scope.Err() != nil {
25010		return
25011	}
25012	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
25013	for _, a := range optional {
25014		a(attrs)
25015	}
25016	opspec := tf.OpSpec{
25017		Type: "RetrieveTPUEmbeddingADAMParametersGradAccumDebug",
25018
25019		Attrs: attrs,
25020	}
25021	op := scope.AddOperation(opspec)
25022	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
25023}
25024
25025// CudnnRNNAttr is an optional argument to CudnnRNN.
25026type CudnnRNNAttr func(optionalAttr)
25027
25028// CudnnRNNRnnMode sets the optional rnn_mode attribute to value.
25029// If not specified, defaults to "lstm"
25030func CudnnRNNRnnMode(value string) CudnnRNNAttr {
25031	return func(m optionalAttr) {
25032		m["rnn_mode"] = value
25033	}
25034}
25035
25036// CudnnRNNInputMode sets the optional input_mode attribute to value.
25037// If not specified, defaults to "linear_input"
25038func CudnnRNNInputMode(value string) CudnnRNNAttr {
25039	return func(m optionalAttr) {
25040		m["input_mode"] = value
25041	}
25042}
25043
25044// CudnnRNNDirection sets the optional direction attribute to value.
25045// If not specified, defaults to "unidirectional"
25046func CudnnRNNDirection(value string) CudnnRNNAttr {
25047	return func(m optionalAttr) {
25048		m["direction"] = value
25049	}
25050}
25051
25052// CudnnRNNDropout sets the optional dropout attribute to value.
25053// If not specified, defaults to 0
25054func CudnnRNNDropout(value float32) CudnnRNNAttr {
25055	return func(m optionalAttr) {
25056		m["dropout"] = value
25057	}
25058}
25059
25060// CudnnRNNSeed sets the optional seed attribute to value.
25061// If not specified, defaults to 0
25062func CudnnRNNSeed(value int64) CudnnRNNAttr {
25063	return func(m optionalAttr) {
25064		m["seed"] = value
25065	}
25066}
25067
25068// CudnnRNNSeed2 sets the optional seed2 attribute to value.
25069// If not specified, defaults to 0
25070func CudnnRNNSeed2(value int64) CudnnRNNAttr {
25071	return func(m optionalAttr) {
25072		m["seed2"] = value
25073	}
25074}
25075
25076// CudnnRNNIsTraining sets the optional is_training attribute to value.
25077// If not specified, defaults to true
25078func CudnnRNNIsTraining(value bool) CudnnRNNAttr {
25079	return func(m optionalAttr) {
25080		m["is_training"] = value
25081	}
25082}
25083
25084// A RNN backed by cuDNN.
25085//
25086// Computes the RNN from the input and initial states, with respect to the params
25087// buffer.
25088//
25089// rnn_mode: Indicates the type of the RNN model.
25090// input_mode: Indicate whether there is a linear projection between the input and
25091//   the actual computation before the first layer. 'skip_input' is only allowed
25092//   when input_size == num_units; 'auto_select' implies 'skip_input' when
25093//   input_size == num_units; otherwise, it implies 'linear_input'.
25094// direction: Indicates whether a bidirectional model will be used. Should be
25095//   "unidirectional" or "bidirectional".
25096// dropout: Dropout probability. When set to 0., dropout is disabled.
25097// seed: The 1st part of a seed to initialize dropout.
25098// seed2: The 2nd part of a seed to initialize dropout.
25099// input: A 3-D tensor with the shape of [seq_length, batch_size, input_size].
25100// input_h: A 3-D tensor with the shape of [num_layer * dir, batch_size,
25101//     num_units].
25102// input_c: For LSTM, a 3-D tensor with the shape of
25103//     [num_layer * dir, batch, num_units]. For other models, it is ignored.
25104// params: A 1-D tensor that contains the weights and biases in an opaque layout.
25105//     The size must be created through CudnnRNNParamsSize, and initialized
25106//     separately. Note that they might not be compatible across different
25107//     generations. So it is a good idea to save and restore
25108// output: A 3-D tensor with the shape of [seq_length, batch_size,
25109//     dir * num_units].
25110// output_h: The same shape has input_h.
25111// output_c: The same shape as input_c for LSTM. An empty tensor for other models.
25112// is_training: Indicates whether this operation is used for inferenece or
25113//   training.
25114// reserve_space: An opaque tensor that can be used in backprop calculation. It
25115//   is only produced if is_training is false.
25116func CudnnRNN(scope *Scope, input tf.Output, input_h tf.Output, input_c tf.Output, params tf.Output, optional ...CudnnRNNAttr) (output tf.Output, output_h tf.Output, output_c tf.Output, reserve_space tf.Output) {
25117	if scope.Err() != nil {
25118		return
25119	}
25120	attrs := map[string]interface{}{}
25121	for _, a := range optional {
25122		a(attrs)
25123	}
25124	opspec := tf.OpSpec{
25125		Type: "CudnnRNN",
25126		Input: []tf.Input{
25127			input, input_h, input_c, params,
25128		},
25129		Attrs: attrs,
25130	}
25131	op := scope.AddOperation(opspec)
25132	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
25133}
25134
25135// DecodeCompressedAttr is an optional argument to DecodeCompressed.
25136type DecodeCompressedAttr func(optionalAttr)
25137
25138// DecodeCompressedCompressionType sets the optional compression_type attribute to value.
25139//
25140// value: A scalar containing either (i) the empty string (no
25141// compression), (ii) "ZLIB", or (iii) "GZIP".
25142// If not specified, defaults to ""
25143func DecodeCompressedCompressionType(value string) DecodeCompressedAttr {
25144	return func(m optionalAttr) {
25145		m["compression_type"] = value
25146	}
25147}
25148
25149// Decompress strings.
25150//
25151// This op decompresses each element of the `bytes` input `Tensor`, which
25152// is assumed to be compressed using the given `compression_type`.
25153//
25154// The `output` is a string `Tensor` of the same shape as `bytes`,
25155// each element containing the decompressed data from the corresponding
25156// element in `bytes`.
25157//
25158// Arguments:
25159//	bytes: A Tensor of string which is compressed.
25160//
25161// Returns A Tensor with the same shape as input `bytes`, uncompressed
25162// from bytes.
25163func DecodeCompressed(scope *Scope, bytes tf.Output, optional ...DecodeCompressedAttr) (output tf.Output) {
25164	if scope.Err() != nil {
25165		return
25166	}
25167	attrs := map[string]interface{}{}
25168	for _, a := range optional {
25169		a(attrs)
25170	}
25171	opspec := tf.OpSpec{
25172		Type: "DecodeCompressed",
25173		Input: []tf.Input{
25174			bytes,
25175		},
25176		Attrs: attrs,
25177	}
25178	op := scope.AddOperation(opspec)
25179	return op.Output(0)
25180}
25181
25182// RetrieveTPUEmbeddingMDLAdagradLightParametersAttr is an optional argument to RetrieveTPUEmbeddingMDLAdagradLightParameters.
25183type RetrieveTPUEmbeddingMDLAdagradLightParametersAttr func(optionalAttr)
25184
25185// RetrieveTPUEmbeddingMDLAdagradLightParametersTableId sets the optional table_id attribute to value.
25186// If not specified, defaults to -1
25187//
25188// REQUIRES: value >= -1
25189func RetrieveTPUEmbeddingMDLAdagradLightParametersTableId(value int64) RetrieveTPUEmbeddingMDLAdagradLightParametersAttr {
25190	return func(m optionalAttr) {
25191		m["table_id"] = value
25192	}
25193}
25194
25195// RetrieveTPUEmbeddingMDLAdagradLightParametersTableName sets the optional table_name attribute to value.
25196// If not specified, defaults to ""
25197func RetrieveTPUEmbeddingMDLAdagradLightParametersTableName(value string) RetrieveTPUEmbeddingMDLAdagradLightParametersAttr {
25198	return func(m optionalAttr) {
25199		m["table_name"] = value
25200	}
25201}
25202
25203// Retrieve MDL Adagrad Light embedding parameters.
25204//
25205// An op that retrieves optimization parameters from embedding to host
25206// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
25207// the correct embedding table configuration. For example, this op is
25208// used to retrieve updated parameters before saving a checkpoint.
25209//
25210// Returns Parameter parameters updated by the MDL Adagrad Light optimization algorithm.Parameter accumulators updated by the MDL Adagrad Light optimization algorithm.Parameter weights updated by the MDL Adagrad Light optimization algorithm.Parameter benefits updated by the MDL Adagrad Light optimization algorithm.
25211func RetrieveTPUEmbeddingMDLAdagradLightParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingMDLAdagradLightParametersAttr) (parameters tf.Output, accumulators tf.Output, weights tf.Output, benefits tf.Output) {
25212	if scope.Err() != nil {
25213		return
25214	}
25215	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
25216	for _, a := range optional {
25217		a(attrs)
25218	}
25219	opspec := tf.OpSpec{
25220		Type: "RetrieveTPUEmbeddingMDLAdagradLightParameters",
25221
25222		Attrs: attrs,
25223	}
25224	op := scope.AddOperation(opspec)
25225	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
25226}
25227
25228// RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebugAttr is an optional argument to RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug.
25229type RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebugAttr func(optionalAttr)
25230
25231// RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebugTableId sets the optional table_id attribute to value.
25232// If not specified, defaults to -1
25233//
25234// REQUIRES: value >= -1
25235func RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebugTableId(value int64) RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebugAttr {
25236	return func(m optionalAttr) {
25237		m["table_id"] = value
25238	}
25239}
25240
25241// RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebugTableName sets the optional table_name attribute to value.
25242// If not specified, defaults to ""
25243func RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebugTableName(value string) RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebugAttr {
25244	return func(m optionalAttr) {
25245		m["table_name"] = value
25246	}
25247}
25248
25249// Retrieve Adadelta embedding parameters with debug support.
25250//
25251// An op that retrieves optimization parameters from embedding to host
25252// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
25253// the correct embedding table configuration. For example, this op is
25254// used to retrieve updated parameters before saving a checkpoint.
25255//
25256// Returns Parameter parameters updated by the Adadelta optimization algorithm.Parameter accumulators updated by the Adadelta optimization algorithm.Parameter updates updated by the Adadelta optimization algorithm.Parameter gradient_accumulators updated by the Adadelta optimization algorithm.
25257func RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebugAttr) (parameters tf.Output, accumulators tf.Output, updates tf.Output, gradient_accumulators tf.Output) {
25258	if scope.Err() != nil {
25259		return
25260	}
25261	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
25262	for _, a := range optional {
25263		a(attrs)
25264	}
25265	opspec := tf.OpSpec{
25266		Type: "RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug",
25267
25268		Attrs: attrs,
25269	}
25270	op := scope.AddOperation(opspec)
25271	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
25272}
25273
25274// MapClearAttr is an optional argument to MapClear.
25275type MapClearAttr func(optionalAttr)
25276
25277// MapClearCapacity sets the optional capacity attribute to value.
25278// If not specified, defaults to 0
25279//
25280// REQUIRES: value >= 0
25281func MapClearCapacity(value int64) MapClearAttr {
25282	return func(m optionalAttr) {
25283		m["capacity"] = value
25284	}
25285}
25286
25287// MapClearMemoryLimit sets the optional memory_limit attribute to value.
25288// If not specified, defaults to 0
25289//
25290// REQUIRES: value >= 0
25291func MapClearMemoryLimit(value int64) MapClearAttr {
25292	return func(m optionalAttr) {
25293		m["memory_limit"] = value
25294	}
25295}
25296
25297// MapClearContainer sets the optional container attribute to value.
25298// If not specified, defaults to ""
25299func MapClearContainer(value string) MapClearAttr {
25300	return func(m optionalAttr) {
25301		m["container"] = value
25302	}
25303}
25304
25305// MapClearSharedName sets the optional shared_name attribute to value.
25306// If not specified, defaults to ""
25307func MapClearSharedName(value string) MapClearAttr {
25308	return func(m optionalAttr) {
25309		m["shared_name"] = value
25310	}
25311}
25312
25313// Op removes all elements in the underlying container.
25314//
25315// Returns the created operation.
25316func MapClear(scope *Scope, dtypes []tf.DataType, optional ...MapClearAttr) (o *tf.Operation) {
25317	if scope.Err() != nil {
25318		return
25319	}
25320	attrs := map[string]interface{}{"dtypes": dtypes}
25321	for _, a := range optional {
25322		a(attrs)
25323	}
25324	opspec := tf.OpSpec{
25325		Type: "MapClear",
25326
25327		Attrs: attrs,
25328	}
25329	return scope.AddOperation(opspec)
25330}
25331
25332// DecodeCSVAttr is an optional argument to DecodeCSV.
25333type DecodeCSVAttr func(optionalAttr)
25334
25335// DecodeCSVFieldDelim sets the optional field_delim attribute to value.
25336//
25337// value: char delimiter to separate fields in a record.
25338// If not specified, defaults to ","
25339func DecodeCSVFieldDelim(value string) DecodeCSVAttr {
25340	return func(m optionalAttr) {
25341		m["field_delim"] = value
25342	}
25343}
25344
25345// DecodeCSVUseQuoteDelim sets the optional use_quote_delim attribute to value.
25346//
25347// value: If false, treats double quotation marks as regular
25348// characters inside of the string fields (ignoring RFC 4180, Section 2,
25349// Bullet 5).
25350// If not specified, defaults to true
25351func DecodeCSVUseQuoteDelim(value bool) DecodeCSVAttr {
25352	return func(m optionalAttr) {
25353		m["use_quote_delim"] = value
25354	}
25355}
25356
25357// DecodeCSVNaValue sets the optional na_value attribute to value.
25358//
25359// value: Additional string to recognize as NA/NaN.
25360// If not specified, defaults to ""
25361func DecodeCSVNaValue(value string) DecodeCSVAttr {
25362	return func(m optionalAttr) {
25363		m["na_value"] = value
25364	}
25365}
25366
25367// DecodeCSVSelectCols sets the optional select_cols attribute to value.
25368// If not specified, defaults to <>
25369func DecodeCSVSelectCols(value []int64) DecodeCSVAttr {
25370	return func(m optionalAttr) {
25371		m["select_cols"] = value
25372	}
25373}
25374
25375// Convert CSV records to tensors. Each column maps to one tensor.
25376//
25377// RFC 4180 format is expected for the CSV records.
25378// (https://tools.ietf.org/html/rfc4180)
25379// Note that we allow leading and trailing spaces with int or float field.
25380//
25381// Arguments:
25382//	records: Each string is a record/row in the csv and all records should have
25383// the same format.
25384//	record_defaults: One tensor per column of the input record, with either a
25385// scalar default value for that column or an empty vector if the column is
25386// required.
25387//
25388// Returns Each tensor will have the same shape as records.
25389func DecodeCSV(scope *Scope, records tf.Output, record_defaults []tf.Output, optional ...DecodeCSVAttr) (output []tf.Output) {
25390	if scope.Err() != nil {
25391		return
25392	}
25393	attrs := map[string]interface{}{}
25394	for _, a := range optional {
25395		a(attrs)
25396	}
25397	opspec := tf.OpSpec{
25398		Type: "DecodeCSV",
25399		Input: []tf.Input{
25400			records, tf.OutputList(record_defaults),
25401		},
25402		Attrs: attrs,
25403	}
25404	op := scope.AddOperation(opspec)
25405	if scope.Err() != nil {
25406		return
25407	}
25408	var idx int
25409	var err error
25410	if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
25411		scope.UpdateErr("DecodeCSV", err)
25412		return
25413	}
25414	return output
25415}
25416
25417// Produces the max pool of the input tensor for quantized types.
25418//
25419// Arguments:
25420//	input: The 4D (batch x rows x cols x depth) Tensor to MaxReduce over.
25421//	min_input: The float value that the lowest quantized input value represents.
25422//	max_input: The float value that the highest quantized input value represents.
25423//	ksize: The size of the window for each dimension of the input tensor.
25424// The length must be 4 to match the number of dimensions of the input.
25425//	strides: The stride of the sliding window for each dimension of the input
25426// tensor. The length must be 4 to match the number of dimensions of the input.
25427//	padding: The type of padding algorithm to use.
25428//
25429// Returns The float value that the lowest quantized output value represents.The float value that the highest quantized output value represents.
25430func QuantizedMaxPool(scope *Scope, input tf.Output, min_input tf.Output, max_input tf.Output, ksize []int64, strides []int64, padding string) (output tf.Output, min_output tf.Output, max_output tf.Output) {
25431	if scope.Err() != nil {
25432		return
25433	}
25434	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
25435	opspec := tf.OpSpec{
25436		Type: "QuantizedMaxPool",
25437		Input: []tf.Input{
25438			input, min_input, max_input,
25439		},
25440		Attrs: attrs,
25441	}
25442	op := scope.AddOperation(opspec)
25443	return op.Output(0), op.Output(1), op.Output(2)
25444}
25445
25446// RandomShuffleAttr is an optional argument to RandomShuffle.
25447type RandomShuffleAttr func(optionalAttr)
25448
25449// RandomShuffleSeed sets the optional seed attribute to value.
25450//
25451// value: If either `seed` or `seed2` are set to be non-zero, the random number
25452// generator is seeded by the given seed.  Otherwise, it is seeded by a
25453// random seed.
25454// If not specified, defaults to 0
25455func RandomShuffleSeed(value int64) RandomShuffleAttr {
25456	return func(m optionalAttr) {
25457		m["seed"] = value
25458	}
25459}
25460
25461// RandomShuffleSeed2 sets the optional seed2 attribute to value.
25462//
25463// value: A second seed to avoid seed collision.
25464// If not specified, defaults to 0
25465func RandomShuffleSeed2(value int64) RandomShuffleAttr {
25466	return func(m optionalAttr) {
25467		m["seed2"] = value
25468	}
25469}
25470
25471// Randomly shuffles a tensor along its first dimension.
25472//
25473//   The tensor is shuffled along dimension 0, such that each `value[j]` is mapped
25474//   to one and only one `output[i]`. For example, a mapping that might occur for a
25475//   3x2 tensor is:
25476//
25477// ```
25478// [[1, 2],       [[5, 6],
25479//  [3, 4],  ==>   [1, 2],
25480//  [5, 6]]        [3, 4]]
25481// ```
25482//
25483// Arguments:
25484//	value: The tensor to be shuffled.
25485//
25486// Returns A tensor of same shape and type as `value`, shuffled along its first
25487// dimension.
25488func RandomShuffle(scope *Scope, value tf.Output, optional ...RandomShuffleAttr) (output tf.Output) {
25489	if scope.Err() != nil {
25490		return
25491	}
25492	attrs := map[string]interface{}{}
25493	for _, a := range optional {
25494		a(attrs)
25495	}
25496	opspec := tf.OpSpec{
25497		Type: "RandomShuffle",
25498		Input: []tf.Input{
25499			value,
25500		},
25501		Attrs: attrs,
25502	}
25503	op := scope.AddOperation(opspec)
25504	return op.Output(0)
25505}
25506
25507// EnqueueTPUEmbeddingSparseBatchAttr is an optional argument to EnqueueTPUEmbeddingSparseBatch.
25508type EnqueueTPUEmbeddingSparseBatchAttr func(optionalAttr)
25509
25510// EnqueueTPUEmbeddingSparseBatchDeviceOrdinal sets the optional device_ordinal attribute to value.
25511//
25512// value: The TPU device to use. Should be >= 0 and less than the number
25513// of TPU cores in the task on which the node is placed.
25514// If not specified, defaults to -1
25515func EnqueueTPUEmbeddingSparseBatchDeviceOrdinal(value int64) EnqueueTPUEmbeddingSparseBatchAttr {
25516	return func(m optionalAttr) {
25517		m["device_ordinal"] = value
25518	}
25519}
25520
25521// EnqueueTPUEmbeddingSparseBatchCombiners sets the optional combiners attribute to value.
25522//
25523// value: A list of string scalars, one for each embedding table that specify
25524// how to normalize the embedding activations after weighted summation.
25525// Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have
25526// the sum of the weights be 0 for 'mean' or the sum of the squared weights be
25527// 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for
25528// all tables.
25529// If not specified, defaults to <>
25530func EnqueueTPUEmbeddingSparseBatchCombiners(value []string) EnqueueTPUEmbeddingSparseBatchAttr {
25531	return func(m optionalAttr) {
25532		m["combiners"] = value
25533	}
25534}
25535
25536// An op that enqueues TPUEmbedding input indices from a SparseTensor.
25537//
25538// This Op eases the porting of code that uses embedding_lookup_sparse(),
25539// although some Python preprocessing of the SparseTensor arguments to
25540// embedding_lookup_sparse() is required to produce the arguments to this Op,
25541// since only a single EnqueueTPUEmbeddingSparseBatch Op is allowed per training
25542// step.
25543//
25544// The tensors at corresponding positions in the three input lists
25545// must have the same shape, i.e. rank 1 with dim_size() equal to the total
25546// number of lookups into the table described by the corresponding table_id.
25547//
25548// Arguments:
25549//	sample_indices: A list of rank 1 Tensors specifying the training example and
25550// feature to which the corresponding embedding_indices and aggregation_weights
25551// values belong. sample_indices[i] must equal b * nf + f, where nf is the
25552// number of features from the corresponding table, f is in [0, nf), and
25553// b is in [0, batch size).
25554//	embedding_indices: A list of rank 1 Tensors, indices into the embedding tables.
25555//	aggregation_weights: A list of rank 1 Tensors containing per sample -- i.e. per
25556// (training example, feature) -- aggregation weights.
25557//	mode_override: A string input that overrides the mode specified in the
25558// TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',
25559// 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set
25560// in TPUEmbeddingConfiguration is used, otherwise mode_override is used.
25561//
25562// Returns the created operation.
25563func EnqueueTPUEmbeddingSparseBatch(scope *Scope, sample_indices []tf.Output, embedding_indices []tf.Output, aggregation_weights []tf.Output, mode_override tf.Output, optional ...EnqueueTPUEmbeddingSparseBatchAttr) (o *tf.Operation) {
25564	if scope.Err() != nil {
25565		return
25566	}
25567	attrs := map[string]interface{}{}
25568	for _, a := range optional {
25569		a(attrs)
25570	}
25571	opspec := tf.OpSpec{
25572		Type: "EnqueueTPUEmbeddingSparseBatch",
25573		Input: []tf.Input{
25574			tf.OutputList(sample_indices), tf.OutputList(embedding_indices), tf.OutputList(aggregation_weights), mode_override,
25575		},
25576		Attrs: attrs,
25577	}
25578	return scope.AddOperation(opspec)
25579}
25580
25581// StatelessRandomNormalAttr is an optional argument to StatelessRandomNormal.
25582type StatelessRandomNormalAttr func(optionalAttr)
25583
25584// StatelessRandomNormalDtype sets the optional dtype attribute to value.
25585//
25586// value: The type of the output.
25587// If not specified, defaults to DT_FLOAT
25588func StatelessRandomNormalDtype(value tf.DataType) StatelessRandomNormalAttr {
25589	return func(m optionalAttr) {
25590		m["dtype"] = value
25591	}
25592}
25593
25594// Outputs deterministic pseudorandom values from a normal distribution.
25595//
25596// The generated values will have mean 0 and standard deviation 1.
25597//
25598// The outputs are a deterministic function of `shape` and `seed`.
25599//
25600// Arguments:
25601//	shape: The shape of the output tensor.
25602//	seed: 2 seeds (shape [2]).
25603//
25604// Returns Random values with specified shape.
25605func StatelessRandomNormal(scope *Scope, shape tf.Output, seed tf.Output, optional ...StatelessRandomNormalAttr) (output tf.Output) {
25606	if scope.Err() != nil {
25607		return
25608	}
25609	attrs := map[string]interface{}{}
25610	for _, a := range optional {
25611		a(attrs)
25612	}
25613	opspec := tf.OpSpec{
25614		Type: "StatelessRandomNormal",
25615		Input: []tf.Input{
25616			shape, seed,
25617		},
25618		Attrs: attrs,
25619	}
25620	op := scope.AddOperation(opspec)
25621	return op.Output(0)
25622}
25623
25624// An Op to exchange data across TPU replicas.
25625//
25626// On each replica, the input is split into `split_count` blocks along
25627// `split_dimension` and send to the other replicas given group_assignment. After
25628// receiving `split_count` - 1 blocks from other replicas, we concatenate the
25629// blocks along `concat_dimension` as the output.
25630//
25631// For example, suppose there are 2 TPU replicas:
25632// replica 0 receives input: `[[A, B]]`
25633// replica 1 receives input: `[[C, D]]`
25634//
25635// group_assignment=`[[0, 1]]`
25636// concat_dimension=0
25637// split_dimension=1
25638// split_count=2
25639//
25640// replica 0's output: `[[A], [C]]`
25641// replica 1's output: `[[B], [D]]`
25642//
25643// Arguments:
25644//	input: The local input to the sum.
25645//	group_assignment: An int32 tensor with shape
25646// [num_groups, num_replicas_per_group]. `group_assignment[i]` represents the
25647// replica ids in the ith subgroup.
25648//	concat_dimension: The dimension number to concatenate.
25649//	split_dimension: The dimension number to split.
25650//	split_count: The number of splits, this number must equal to the sub-group
25651// size(group_assignment.get_shape()[1])
25652//
25653// Returns The exchanged result.
25654func AllToAll(scope *Scope, input tf.Output, group_assignment tf.Output, concat_dimension int64, split_dimension int64, split_count int64) (output tf.Output) {
25655	if scope.Err() != nil {
25656		return
25657	}
25658	attrs := map[string]interface{}{"concat_dimension": concat_dimension, "split_dimension": split_dimension, "split_count": split_count}
25659	opspec := tf.OpSpec{
25660		Type: "AllToAll",
25661		Input: []tf.Input{
25662			input, group_assignment,
25663		},
25664		Attrs: attrs,
25665	}
25666	op := scope.AddOperation(opspec)
25667	return op.Output(0)
25668}
25669
25670// Adds a value to the current value of a variable.
25671//
25672// Any ReadVariableOp with a control dependency on this op is guaranteed to
25673// see the incremented value or a subsequent newer one.
25674//
25675// Arguments:
25676//	resource: handle to the resource in which to store the variable.
25677//	value: the value by which the variable will be incremented.
25678//
25679// Returns the created operation.
25680func AssignAddVariableOp(scope *Scope, resource tf.Output, value tf.Output) (o *tf.Operation) {
25681	if scope.Err() != nil {
25682		return
25683	}
25684	opspec := tf.OpSpec{
25685		Type: "AssignAddVariableOp",
25686		Input: []tf.Input{
25687			resource, value,
25688		},
25689	}
25690	return scope.AddOperation(opspec)
25691}
25692
25693// Real-valued fast Fourier transform.
25694//
25695// Computes the 1-dimensional discrete Fourier transform of a real-valued signal
25696// over the inner-most dimension of `input`.
25697//
25698// Since the DFT of a real signal is Hermitian-symmetric, `RFFT` only returns the
25699// `fft_length / 2 + 1` unique components of the FFT: the zero-frequency term,
25700// followed by the `fft_length / 2` positive-frequency terms.
25701//
25702// Along the axis `RFFT` is computed on, if `fft_length` is smaller than the
25703// corresponding dimension of `input`, the dimension is cropped. If it is larger,
25704// the dimension is padded with zeros.
25705//
25706// Arguments:
25707//	input: A float32 tensor.
25708//	fft_length: An int32 tensor of shape [1]. The FFT length.
25709//
25710// Returns A complex64 tensor of the same rank as `input`. The inner-most
25711//   dimension of `input` is replaced with the `fft_length / 2 + 1` unique
25712//   frequency components of its 1D Fourier transform.
25713//
25714// @compatibility(numpy)
25715// Equivalent to np.fft.rfft
25716// @end_compatibility
25717func RFFT(scope *Scope, input tf.Output, fft_length tf.Output) (output tf.Output) {
25718	if scope.Err() != nil {
25719		return
25720	}
25721	opspec := tf.OpSpec{
25722		Type: "RFFT",
25723		Input: []tf.Input{
25724			input, fft_length,
25725		},
25726	}
25727	op := scope.AddOperation(opspec)
25728	return op.Output(0)
25729}
25730
25731// RetrieveTPUEmbeddingAdadeltaParametersAttr is an optional argument to RetrieveTPUEmbeddingAdadeltaParameters.
25732type RetrieveTPUEmbeddingAdadeltaParametersAttr func(optionalAttr)
25733
25734// RetrieveTPUEmbeddingAdadeltaParametersTableId sets the optional table_id attribute to value.
25735// If not specified, defaults to -1
25736//
25737// REQUIRES: value >= -1
25738func RetrieveTPUEmbeddingAdadeltaParametersTableId(value int64) RetrieveTPUEmbeddingAdadeltaParametersAttr {
25739	return func(m optionalAttr) {
25740		m["table_id"] = value
25741	}
25742}
25743
25744// RetrieveTPUEmbeddingAdadeltaParametersTableName sets the optional table_name attribute to value.
25745// If not specified, defaults to ""
25746func RetrieveTPUEmbeddingAdadeltaParametersTableName(value string) RetrieveTPUEmbeddingAdadeltaParametersAttr {
25747	return func(m optionalAttr) {
25748		m["table_name"] = value
25749	}
25750}
25751
25752// Retrieve Adadelta embedding parameters.
25753//
25754// An op that retrieves optimization parameters from embedding to host
25755// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
25756// the correct embedding table configuration. For example, this op is
25757// used to retrieve updated parameters before saving a checkpoint.
25758//
25759// Returns Parameter parameters updated by the Adadelta optimization algorithm.Parameter accumulators updated by the Adadelta optimization algorithm.Parameter updates updated by the Adadelta optimization algorithm.
25760func RetrieveTPUEmbeddingAdadeltaParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingAdadeltaParametersAttr) (parameters tf.Output, accumulators tf.Output, updates tf.Output) {
25761	if scope.Err() != nil {
25762		return
25763	}
25764	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
25765	for _, a := range optional {
25766		a(attrs)
25767	}
25768	opspec := tf.OpSpec{
25769		Type: "RetrieveTPUEmbeddingAdadeltaParameters",
25770
25771		Attrs: attrs,
25772	}
25773	op := scope.AddOperation(opspec)
25774	return op.Output(0), op.Output(1), op.Output(2)
25775}
25776
25777// UpperBoundAttr is an optional argument to UpperBound.
25778type UpperBoundAttr func(optionalAttr)
25779
25780// UpperBoundOutType sets the optional out_type attribute to value.
25781// If not specified, defaults to DT_INT32
25782func UpperBoundOutType(value tf.DataType) UpperBoundAttr {
25783	return func(m optionalAttr) {
25784		m["out_type"] = value
25785	}
25786}
25787
25788// Applies upper_bound(sorted_search_values, values) along each row.
25789//
25790// Each set of rows with the same index in (sorted_inputs, values) is treated
25791// independently.  The resulting row is the equivalent of calling
25792// `np.searchsorted(sorted_inputs, values, side='right')`.
25793//
25794// The result is not a global index to the entire
25795// `Tensor`, but rather just the index in the last dimension.
25796//
25797// A 2-D example:
25798//   sorted_sequence = [[0, 3, 9, 9, 10],
25799//                      [1, 2, 3, 4, 5]]
25800//   values = [[2, 4, 9],
25801//             [0, 2, 6]]
25802//
25803//   result = UpperBound(sorted_sequence, values)
25804//
25805//   result == [[1, 2, 4],
25806//              [0, 2, 5]]
25807//
25808// Arguments:
25809//	sorted_inputs: 2-D Tensor where each row is ordered.
25810//	values: 2-D Tensor with the same numbers of rows as `sorted_search_values`. Contains
25811// the values that will be searched for in `sorted_search_values`.
25812//
25813// Returns A `Tensor` with the same shape as `values`.  It contains the last scalar index
25814// into the last dimension where values can be inserted without changing the
25815// ordered property.
25816func UpperBound(scope *Scope, sorted_inputs tf.Output, values tf.Output, optional ...UpperBoundAttr) (output tf.Output) {
25817	if scope.Err() != nil {
25818		return
25819	}
25820	attrs := map[string]interface{}{}
25821	for _, a := range optional {
25822		a(attrs)
25823	}
25824	opspec := tf.OpSpec{
25825		Type: "UpperBound",
25826		Input: []tf.Input{
25827			sorted_inputs, values,
25828		},
25829		Attrs: attrs,
25830	}
25831	op := scope.AddOperation(opspec)
25832	return op.Output(0)
25833}
25834
25835// FractionalMaxPoolGradAttr is an optional argument to FractionalMaxPoolGrad.
25836type FractionalMaxPoolGradAttr func(optionalAttr)
25837
25838// FractionalMaxPoolGradOverlapping sets the optional overlapping attribute to value.
25839//
25840// value: When set to True, it means when pooling, the values at the boundary
25841// of adjacent pooling cells are used by both cells. For example:
25842//
25843// `index  0  1  2  3  4`
25844//
25845// `value  20 5  16 3  7`
25846//
25847// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
25848// The result would be [20, 16] for fractional max pooling.
25849// If not specified, defaults to false
25850func FractionalMaxPoolGradOverlapping(value bool) FractionalMaxPoolGradAttr {
25851	return func(m optionalAttr) {
25852		m["overlapping"] = value
25853	}
25854}
25855
25856// Computes gradient of the FractionalMaxPool function.
25857//
25858// Arguments:
25859//	orig_input: Original input for `fractional_max_pool`
25860//	orig_output: Original output for `fractional_max_pool`
25861//	out_backprop: 4-D with shape `[batch, height, width, channels]`.  Gradients
25862// w.r.t. the output of `fractional_max_pool`.
25863//	row_pooling_sequence: row pooling sequence, form pooling region with
25864// col_pooling_sequence.
25865//	col_pooling_sequence: column pooling sequence, form pooling region with
25866// row_pooling sequence.
25867//
25868// Returns 4-D.  Gradients w.r.t. the input of `fractional_max_pool`.
25869func FractionalMaxPoolGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, out_backprop tf.Output, row_pooling_sequence tf.Output, col_pooling_sequence tf.Output, optional ...FractionalMaxPoolGradAttr) (output tf.Output) {
25870	if scope.Err() != nil {
25871		return
25872	}
25873	attrs := map[string]interface{}{}
25874	for _, a := range optional {
25875		a(attrs)
25876	}
25877	opspec := tf.OpSpec{
25878		Type: "FractionalMaxPoolGrad",
25879		Input: []tf.Input{
25880			orig_input, orig_output, out_backprop, row_pooling_sequence, col_pooling_sequence,
25881		},
25882		Attrs: attrs,
25883	}
25884	op := scope.AddOperation(opspec)
25885	return op.Output(0)
25886}
25887
25888// SparseReduceMaxSparseAttr is an optional argument to SparseReduceMaxSparse.
25889type SparseReduceMaxSparseAttr func(optionalAttr)
25890
25891// SparseReduceMaxSparseKeepDims sets the optional keep_dims attribute to value.
25892//
25893// value: If true, retain reduced dimensions with length 1.
25894// If not specified, defaults to false
25895func SparseReduceMaxSparseKeepDims(value bool) SparseReduceMaxSparseAttr {
25896	return func(m optionalAttr) {
25897		m["keep_dims"] = value
25898	}
25899}
25900
25901// Computes the max of elements across dimensions of a SparseTensor.
25902//
25903// This Op takes a SparseTensor and is the sparse counterpart to
25904// `tf.reduce_max()`.  In contrast to SparseReduceMax, this Op returns a
25905// SparseTensor.
25906//
25907// Reduces `sp_input` along the dimensions given in `reduction_axes`.  Unless
25908// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
25909// `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
25910// with length 1.
25911//
25912// If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
25913// with a single element is returned.  Additionally, the axes can be negative,
25914// which are interpreted according to the indexing rules in Python.
25915//
25916// Arguments:
25917//	input_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
25918// SparseTensor, possibly not in canonical ordering.
25919//	input_values: 1-D.  `N` non-empty values corresponding to `input_indices`.
25920//	input_shape: 1-D.  Shape of the input SparseTensor.
25921//	reduction_axes: 1-D.  Length-`K` vector containing the reduction axes.
25922func SparseReduceMaxSparse(scope *Scope, input_indices tf.Output, input_values tf.Output, input_shape tf.Output, reduction_axes tf.Output, optional ...SparseReduceMaxSparseAttr) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output) {
25923	if scope.Err() != nil {
25924		return
25925	}
25926	attrs := map[string]interface{}{}
25927	for _, a := range optional {
25928		a(attrs)
25929	}
25930	opspec := tf.OpSpec{
25931		Type: "SparseReduceMaxSparse",
25932		Input: []tf.Input{
25933			input_indices, input_values, input_shape, reduction_axes,
25934		},
25935		Attrs: attrs,
25936	}
25937	op := scope.AddOperation(opspec)
25938	return op.Output(0), op.Output(1), op.Output(2)
25939}
25940
25941// Convert one or more images from HSV to RGB.
25942//
25943// Outputs a tensor of the same shape as the `images` tensor, containing the RGB
25944// value of the pixels. The output is only well defined if the value in `images`
25945// are in `[0,1]`.
25946//
25947// See `rgb_to_hsv` for a description of the HSV encoding.
25948//
25949// Arguments:
25950//	images: 1-D or higher rank. HSV data to convert. Last dimension must be size 3.
25951//
25952// Returns `images` converted to RGB.
25953func HSVToRGB(scope *Scope, images tf.Output) (output tf.Output) {
25954	if scope.Err() != nil {
25955		return
25956	}
25957	opspec := tf.OpSpec{
25958		Type: "HSVToRGB",
25959		Input: []tf.Input{
25960			images,
25961		},
25962	}
25963	op := scope.AddOperation(opspec)
25964	return op.Output(0)
25965}
25966
25967// Computes the gradient of the sigmoid of `x` wrt its input.
25968//
25969// Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and
25970// `dy` is the corresponding input gradient.
25971func SigmoidGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
25972	if scope.Err() != nil {
25973		return
25974	}
25975	opspec := tf.OpSpec{
25976		Type: "SigmoidGrad",
25977		Input: []tf.Input{
25978			y, dy,
25979		},
25980	}
25981	op := scope.AddOperation(opspec)
25982	return op.Output(0)
25983}
25984
25985// Creates a dataset that changes the batch size.
25986//
25987// Creates a dataset that changes the batch size of the dataset to current batch
25988// size // num_workers.
25989//
25990// Arguments:
25991//	input_dataset: A variant tensor representing the input dataset.
25992//	num_workers: A scalar representing the number of workers to distribute this batch across. As
25993// a result of this transformation the current batch size would end up being
25994// divided  by this parameter.
25995//
25996//
25997func ExperimentalRebatchDataset(scope *Scope, input_dataset tf.Output, num_workers tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
25998	if scope.Err() != nil {
25999		return
26000	}
26001	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
26002	opspec := tf.OpSpec{
26003		Type: "ExperimentalRebatchDataset",
26004		Input: []tf.Input{
26005			input_dataset, num_workers,
26006		},
26007		Attrs: attrs,
26008	}
26009	op := scope.AddOperation(opspec)
26010	return op.Output(0)
26011}
26012
26013// Creates a dataset that emits the outputs of `input_dataset` `count` times.
26014//
26015// Arguments:
26016//
26017//	count: A scalar representing the number of times that `input_dataset` should
26018// be repeated. A value of `-1` indicates that it should be repeated infinitely.
26019//
26020//
26021func RepeatDataset(scope *Scope, input_dataset tf.Output, count tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
26022	if scope.Err() != nil {
26023		return
26024	}
26025	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
26026	opspec := tf.OpSpec{
26027		Type: "RepeatDataset",
26028		Input: []tf.Input{
26029			input_dataset, count,
26030		},
26031		Attrs: attrs,
26032	}
26033	op := scope.AddOperation(opspec)
26034	return op.Output(0)
26035}
26036
26037// ResourceApplyAdagradDAAttr is an optional argument to ResourceApplyAdagradDA.
26038type ResourceApplyAdagradDAAttr func(optionalAttr)
26039
26040// ResourceApplyAdagradDAUseLocking sets the optional use_locking attribute to value.
26041//
26042// value: If True, updating of the var and accum tensors will be protected by
26043// a lock; otherwise the behavior is undefined, but may exhibit less contention.
26044// If not specified, defaults to false
26045func ResourceApplyAdagradDAUseLocking(value bool) ResourceApplyAdagradDAAttr {
26046	return func(m optionalAttr) {
26047		m["use_locking"] = value
26048	}
26049}
26050
26051// Update '*var' according to the proximal adagrad scheme.
26052//
26053// Arguments:
26054//	var_: Should be from a Variable().
26055//	gradient_accumulator: Should be from a Variable().
26056//	gradient_squared_accumulator: Should be from a Variable().
26057//	grad: The gradient.
26058//	lr: Scaling factor. Must be a scalar.
26059//	l1: L1 regularization. Must be a scalar.
26060//	l2: L2 regularization. Must be a scalar.
26061//	global_step: Training step number. Must be a scalar.
26062//
26063// Returns the created operation.
26064func ResourceApplyAdagradDA(scope *Scope, var_ tf.Output, gradient_accumulator tf.Output, gradient_squared_accumulator tf.Output, grad tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, global_step tf.Output, optional ...ResourceApplyAdagradDAAttr) (o *tf.Operation) {
26065	if scope.Err() != nil {
26066		return
26067	}
26068	attrs := map[string]interface{}{}
26069	for _, a := range optional {
26070		a(attrs)
26071	}
26072	opspec := tf.OpSpec{
26073		Type: "ResourceApplyAdagradDA",
26074		Input: []tf.Input{
26075			var_, gradient_accumulator, gradient_squared_accumulator, grad, lr, l1, l2, global_step,
26076		},
26077		Attrs: attrs,
26078	}
26079	return scope.AddOperation(opspec)
26080}
26081
26082// Creates a TensorList which, when stacked, has the value of `tensor`.
26083//
26084// Each tensor in the result list corresponds to one row of the input tensor.
26085//
26086// tensor: The input tensor.
26087// output_handle: The list.
26088func TensorListFromTensor(scope *Scope, tensor tf.Output, element_shape tf.Output) (output_handle tf.Output) {
26089	if scope.Err() != nil {
26090		return
26091	}
26092	opspec := tf.OpSpec{
26093		Type: "TensorListFromTensor",
26094		Input: []tf.Input{
26095			tensor, element_shape,
26096		},
26097	}
26098	op := scope.AddOperation(opspec)
26099	return op.Output(0)
26100}
26101
26102// ConfigureDistributedTPUAttr is an optional argument to ConfigureDistributedTPU.
26103type ConfigureDistributedTPUAttr func(optionalAttr)
26104
26105// ConfigureDistributedTPUEmbeddingConfig sets the optional embedding_config attribute to value.
26106//
26107// value: Reserved. Do not use.
26108// If not specified, defaults to ""
26109func ConfigureDistributedTPUEmbeddingConfig(value string) ConfigureDistributedTPUAttr {
26110	return func(m optionalAttr) {
26111		m["embedding_config"] = value
26112	}
26113}
26114
26115// ConfigureDistributedTPUTpuEmbeddingConfig sets the optional tpu_embedding_config attribute to value.
26116//
26117// value: Serialized tensorflow.tpu.TPUEmbeddingConfiguration that
26118// describes the embedding lookups of the program.
26119// If not specified, defaults to ""
26120func ConfigureDistributedTPUTpuEmbeddingConfig(value string) ConfigureDistributedTPUAttr {
26121	return func(m optionalAttr) {
26122		m["tpu_embedding_config"] = value
26123	}
26124}
26125
26126// ConfigureDistributedTPUIsGlobalInit sets the optional is_global_init attribute to value.
26127//
26128// value: Reserved. Do not use.
26129// If not specified, defaults to false
26130func ConfigureDistributedTPUIsGlobalInit(value bool) ConfigureDistributedTPUAttr {
26131	return func(m optionalAttr) {
26132		m["is_global_init"] = value
26133	}
26134}
26135
26136// Sets up the centralized structures for a distributed TPU system.
26137//
26138// Returns A serialized tensorflow.tpu.TopologyProto that describes the TPU
26139// topology.
26140func ConfigureDistributedTPU(scope *Scope, optional ...ConfigureDistributedTPUAttr) (topology tf.Output) {
26141	if scope.Err() != nil {
26142		return
26143	}
26144	attrs := map[string]interface{}{}
26145	for _, a := range optional {
26146		a(attrs)
26147	}
26148	opspec := tf.OpSpec{
26149		Type: "ConfigureDistributedTPU",
26150
26151		Attrs: attrs,
26152	}
26153	op := scope.AddOperation(opspec)
26154	return op.Output(0)
26155}
26156
26157// Reshapes a quantized tensor as per the Reshape op.
26158//
26159// ```
26160//
26161// Arguments:
26162//
26163//	shape: Defines the shape of the output tensor.
26164//	input_min: The minimum value of the input.
26165//	input_max: The maximum value of the input.
26166//
26167// Returns This value is copied from input_min.This value is copied from input_max.
26168func QuantizedReshape(scope *Scope, tensor tf.Output, shape tf.Output, input_min tf.Output, input_max tf.Output) (output tf.Output, output_min tf.Output, output_max tf.Output) {
26169	if scope.Err() != nil {
26170		return
26171	}
26172	opspec := tf.OpSpec{
26173		Type: "QuantizedReshape",
26174		Input: []tf.Input{
26175			tensor, shape, input_min, input_max,
26176		},
26177	}
26178	op := scope.AddOperation(opspec)
26179	return op.Output(0), op.Output(1), op.Output(2)
26180}
26181
26182// PriorityQueueV2Attr is an optional argument to PriorityQueueV2.
26183type PriorityQueueV2Attr func(optionalAttr)
26184
26185// PriorityQueueV2ComponentTypes sets the optional component_types attribute to value.
26186//
26187// value: The type of each component in a value.
26188// If not specified, defaults to <>
26189//
26190// REQUIRES: len(value) >= 0
26191func PriorityQueueV2ComponentTypes(value []tf.DataType) PriorityQueueV2Attr {
26192	return func(m optionalAttr) {
26193		m["component_types"] = value
26194	}
26195}
26196
26197// PriorityQueueV2Capacity sets the optional capacity attribute to value.
26198//
26199// value: The upper bound on the number of elements in this queue.
26200// Negative numbers mean no limit.
26201// If not specified, defaults to -1
26202func PriorityQueueV2Capacity(value int64) PriorityQueueV2Attr {
26203	return func(m optionalAttr) {
26204		m["capacity"] = value
26205	}
26206}
26207
26208// PriorityQueueV2Container sets the optional container attribute to value.
26209//
26210// value: If non-empty, this queue is placed in the given container.
26211// Otherwise, a default container is used.
26212// If not specified, defaults to ""
26213func PriorityQueueV2Container(value string) PriorityQueueV2Attr {
26214	return func(m optionalAttr) {
26215		m["container"] = value
26216	}
26217}
26218
26219// PriorityQueueV2SharedName sets the optional shared_name attribute to value.
26220//
26221// value: If non-empty, this queue will be shared under the given name
26222// across multiple sessions.
26223// If not specified, defaults to ""
26224func PriorityQueueV2SharedName(value string) PriorityQueueV2Attr {
26225	return func(m optionalAttr) {
26226		m["shared_name"] = value
26227	}
26228}
26229
26230// A queue that produces elements sorted by the first component value.
26231//
26232// Note that the PriorityQueue requires the first component of any element
26233// to be a scalar int64, in addition to the other elements declared by
26234// component_types.  Therefore calls to Enqueue and EnqueueMany (resp. Dequeue
26235// and DequeueMany) on a PriorityQueue will all require (resp. output) one extra
26236// entry in their input (resp. output) lists.
26237//
26238// Arguments:
26239//	shapes: The shape of each component in a value. The length of this attr must
26240// be either 0 or the same as the length of component_types. If the length of
26241// this attr is 0, the shapes of queue elements are not constrained, and
26242// only one element may be dequeued at a time.
26243//
26244// Returns The handle to the queue.
26245func PriorityQueueV2(scope *Scope, shapes []tf.Shape, optional ...PriorityQueueV2Attr) (handle tf.Output) {
26246	if scope.Err() != nil {
26247		return
26248	}
26249	attrs := map[string]interface{}{"shapes": shapes}
26250	for _, a := range optional {
26251		a(attrs)
26252	}
26253	opspec := tf.OpSpec{
26254		Type: "PriorityQueueV2",
26255
26256		Attrs: attrs,
26257	}
26258	op := scope.AddOperation(opspec)
26259	return op.Output(0)
26260}
26261
26262// ResourceSparseApplyProximalGradientDescentAttr is an optional argument to ResourceSparseApplyProximalGradientDescent.
26263type ResourceSparseApplyProximalGradientDescentAttr func(optionalAttr)
26264
26265// ResourceSparseApplyProximalGradientDescentUseLocking sets the optional use_locking attribute to value.
26266//
26267// value: If True, the subtraction will be protected by a lock;
26268// otherwise the behavior is undefined, but may exhibit less contention.
26269// If not specified, defaults to false
26270func ResourceSparseApplyProximalGradientDescentUseLocking(value bool) ResourceSparseApplyProximalGradientDescentAttr {
26271	return func(m optionalAttr) {
26272		m["use_locking"] = value
26273	}
26274}
26275
26276// Sparse update '*var' as FOBOS algorithm with fixed learning rate.
26277//
26278// That is for rows we have grad for, we update var as follows:
26279// prox_v = var - alpha * grad
26280// var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
26281//
26282// Arguments:
26283//	var_: Should be from a Variable().
26284//	alpha: Scaling factor. Must be a scalar.
26285//	l1: L1 regularization. Must be a scalar.
26286//	l2: L2 regularization. Must be a scalar.
26287//	grad: The gradient.
26288//	indices: A vector of indices into the first dimension of var and accum.
26289//
26290// Returns the created operation.
26291func ResourceSparseApplyProximalGradientDescent(scope *Scope, var_ tf.Output, alpha tf.Output, l1 tf.Output, l2 tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyProximalGradientDescentAttr) (o *tf.Operation) {
26292	if scope.Err() != nil {
26293		return
26294	}
26295	attrs := map[string]interface{}{}
26296	for _, a := range optional {
26297		a(attrs)
26298	}
26299	opspec := tf.OpSpec{
26300		Type: "ResourceSparseApplyProximalGradientDescent",
26301		Input: []tf.Input{
26302			var_, alpha, l1, l2, grad, indices,
26303		},
26304		Attrs: attrs,
26305	}
26306	return scope.AddOperation(opspec)
26307}
26308
26309// Check if the input matches the regex pattern.
26310//
26311// The input is a string tensor of any shape. The pattern is the
26312// regular expression to be matched with every element of the input tensor.
26313// The boolean values (True or False) of the output tensor indicate
26314// if the input matches the regex pattern provided.
26315//
26316// The pattern follows the re2 syntax (https://github.com/google/re2/wiki/Syntax)
26317//
26318// Arguments:
26319//	input: A string tensor of the text to be processed.
26320//	pattern: The regular expression to match the input.
26321//
26322// Returns A bool tensor with the same shape as `input`.
26323func StaticRegexFullMatch(scope *Scope, input tf.Output, pattern string) (output tf.Output) {
26324	if scope.Err() != nil {
26325		return
26326	}
26327	attrs := map[string]interface{}{"pattern": pattern}
26328	opspec := tf.OpSpec{
26329		Type: "StaticRegexFullMatch",
26330		Input: []tf.Input{
26331			input,
26332		},
26333		Attrs: attrs,
26334	}
26335	op := scope.AddOperation(opspec)
26336	return op.Output(0)
26337}
26338
26339// OutfeedDequeueAttr is an optional argument to OutfeedDequeue.
26340type OutfeedDequeueAttr func(optionalAttr)
26341
26342// OutfeedDequeueDeviceOrdinal sets the optional device_ordinal attribute to value.
26343//
26344// value: The TPU device to use. This should be -1 when the Op
26345// is running on a TPU device, and >= 0 when the Op is running on the CPU
26346// device.
26347// If not specified, defaults to -1
26348func OutfeedDequeueDeviceOrdinal(value int64) OutfeedDequeueAttr {
26349	return func(m optionalAttr) {
26350		m["device_ordinal"] = value
26351	}
26352}
26353
26354// Retrieves a single tensor from the computation outfeed.
26355//
26356// This operation will block indefinitely until data is available.
26357//
26358// Arguments:
26359//	dtype: The type of elements in the tensor.
26360//	shape: The shape of the tensor.
26361//
26362// Returns A tensor that will be read from the device outfeed.
26363func OutfeedDequeue(scope *Scope, dtype tf.DataType, shape tf.Shape, optional ...OutfeedDequeueAttr) (output tf.Output) {
26364	if scope.Err() != nil {
26365		return
26366	}
26367	attrs := map[string]interface{}{"dtype": dtype, "shape": shape}
26368	for _, a := range optional {
26369		a(attrs)
26370	}
26371	opspec := tf.OpSpec{
26372		Type: "OutfeedDequeue",
26373
26374		Attrs: attrs,
26375	}
26376	op := scope.AddOperation(opspec)
26377	return op.Output(0)
26378}
26379
26380// RandomPoissonV2Attr is an optional argument to RandomPoissonV2.
26381type RandomPoissonV2Attr func(optionalAttr)
26382
26383// RandomPoissonV2Seed sets the optional seed attribute to value.
26384//
26385// value: If either `seed` or `seed2` are set to be non-zero, the random number
26386// generator is seeded by the given seed.  Otherwise, it is seeded by a
26387// random seed.
26388// If not specified, defaults to 0
26389func RandomPoissonV2Seed(value int64) RandomPoissonV2Attr {
26390	return func(m optionalAttr) {
26391		m["seed"] = value
26392	}
26393}
26394
26395// RandomPoissonV2Seed2 sets the optional seed2 attribute to value.
26396//
26397// value: A second seed to avoid seed collision.
26398// If not specified, defaults to 0
26399func RandomPoissonV2Seed2(value int64) RandomPoissonV2Attr {
26400	return func(m optionalAttr) {
26401		m["seed2"] = value
26402	}
26403}
26404
26405// RandomPoissonV2Dtype sets the optional dtype attribute to value.
26406// If not specified, defaults to DT_INT64
26407func RandomPoissonV2Dtype(value tf.DataType) RandomPoissonV2Attr {
26408	return func(m optionalAttr) {
26409		m["dtype"] = value
26410	}
26411}
26412
26413// Outputs random values from the Poisson distribution(s) described by rate.
26414//
26415// This op uses two algorithms, depending on rate. If rate >= 10, then
26416// the algorithm by Hormann is used to acquire samples via
26417// transformation-rejection.
26418// See http://www.sciencedirect.com/science/article/pii/0167668793909974.
26419//
26420// Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform
26421// random variables.
26422// See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer
26423// Programming, Volume 2. Addison Wesley
26424//
26425// Arguments:
26426//	shape: 1-D integer tensor. Shape of independent samples to draw from each
26427// distribution described by the shape parameters given in rate.
26428//	rate: A tensor in which each scalar is a "rate" parameter describing the
26429// associated poisson distribution.
26430//
26431// Returns A tensor with shape `shape + shape(rate)`. Each slice
26432// `[:, ..., :, i0, i1, ...iN]` contains the samples drawn for
26433// `rate[i0, i1, ...iN]`.
26434func RandomPoissonV2(scope *Scope, shape tf.Output, rate tf.Output, optional ...RandomPoissonV2Attr) (output tf.Output) {
26435	if scope.Err() != nil {
26436		return
26437	}
26438	attrs := map[string]interface{}{}
26439	for _, a := range optional {
26440		a(attrs)
26441	}
26442	opspec := tf.OpSpec{
26443		Type: "RandomPoissonV2",
26444		Input: []tf.Input{
26445			shape, rate,
26446		},
26447		Attrs: attrs,
26448	}
26449	op := scope.AddOperation(opspec)
26450	return op.Output(0)
26451}
26452
26453// RetrieveTPUEmbeddingRMSPropParametersGradAccumDebugAttr is an optional argument to RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug.
26454type RetrieveTPUEmbeddingRMSPropParametersGradAccumDebugAttr func(optionalAttr)
26455
26456// RetrieveTPUEmbeddingRMSPropParametersGradAccumDebugTableId sets the optional table_id attribute to value.
26457// If not specified, defaults to -1
26458//
26459// REQUIRES: value >= -1
26460func RetrieveTPUEmbeddingRMSPropParametersGradAccumDebugTableId(value int64) RetrieveTPUEmbeddingRMSPropParametersGradAccumDebugAttr {
26461	return func(m optionalAttr) {
26462		m["table_id"] = value
26463	}
26464}
26465
26466// RetrieveTPUEmbeddingRMSPropParametersGradAccumDebugTableName sets the optional table_name attribute to value.
26467// If not specified, defaults to ""
26468func RetrieveTPUEmbeddingRMSPropParametersGradAccumDebugTableName(value string) RetrieveTPUEmbeddingRMSPropParametersGradAccumDebugAttr {
26469	return func(m optionalAttr) {
26470		m["table_name"] = value
26471	}
26472}
26473
26474// Retrieve RMSProp embedding parameters with debug support.
26475//
26476// An op that retrieves optimization parameters from embedding to host
26477// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
26478// the correct embedding table configuration. For example, this op is
26479// used to retrieve updated parameters before saving a checkpoint.
26480//
26481// Returns Parameter parameters updated by the RMSProp optimization algorithm.Parameter ms updated by the RMSProp optimization algorithm.Parameter mom updated by the RMSProp optimization algorithm.Parameter gradient_accumulators updated by the RMSProp optimization algorithm.
26482func RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingRMSPropParametersGradAccumDebugAttr) (parameters tf.Output, ms tf.Output, mom tf.Output, gradient_accumulators tf.Output) {
26483	if scope.Err() != nil {
26484		return
26485	}
26486	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
26487	for _, a := range optional {
26488		a(attrs)
26489	}
26490	opspec := tf.OpSpec{
26491		Type: "RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug",
26492
26493		Attrs: attrs,
26494	}
26495	op := scope.AddOperation(opspec)
26496	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
26497}
26498
26499// Computes the gradient for the rsqrt of `x` wrt its input.
26500//
26501// Specifically, `grad = dy * -0.5 * y^3`, where `y = rsqrt(x)`, and `dy`
26502// is the corresponding input gradient.
26503func RsqrtGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
26504	if scope.Err() != nil {
26505		return
26506	}
26507	opspec := tf.OpSpec{
26508		Type: "RsqrtGrad",
26509		Input: []tf.Input{
26510			y, dy,
26511		},
26512	}
26513	op := scope.AddOperation(opspec)
26514	return op.Output(0)
26515}
26516
26517// Encode audio data using the WAV file format.
26518//
26519// This operation will generate a string suitable to be saved out to create a .wav
26520// audio file. It will be encoded in the 16-bit PCM format. It takes in float
26521// values in the range -1.0f to 1.0f, and any outside that value will be clamped to
26522// that range.
26523//
26524// `audio` is a 2-D float Tensor of shape `[length, channels]`.
26525// `sample_rate` is a scalar Tensor holding the rate to use (e.g. 44100).
26526//
26527// Arguments:
26528//	audio: 2-D with shape `[length, channels]`.
26529//	sample_rate: Scalar containing the sample frequency.
26530//
26531// Returns 0-D. WAV-encoded file contents.
26532func EncodeWav(scope *Scope, audio tf.Output, sample_rate tf.Output) (contents tf.Output) {
26533	if scope.Err() != nil {
26534		return
26535	}
26536	opspec := tf.OpSpec{
26537		Type: "EncodeWav",
26538		Input: []tf.Input{
26539			audio, sample_rate,
26540		},
26541	}
26542	op := scope.AddOperation(opspec)
26543	return op.Output(0)
26544}
26545
26546// ResourceApplyAdaMaxAttr is an optional argument to ResourceApplyAdaMax.
26547type ResourceApplyAdaMaxAttr func(optionalAttr)
26548
26549// ResourceApplyAdaMaxUseLocking sets the optional use_locking attribute to value.
26550//
26551// value: If `True`, updating of the var, m, and v tensors will be protected
26552// by a lock; otherwise the behavior is undefined, but may exhibit less
26553// contention.
26554// If not specified, defaults to false
26555func ResourceApplyAdaMaxUseLocking(value bool) ResourceApplyAdaMaxAttr {
26556	return func(m optionalAttr) {
26557		m["use_locking"] = value
26558	}
26559}
26560
26561// Update '*var' according to the AdaMax algorithm.
26562//
26563// m_t <- beta1 * m_{t-1} + (1 - beta1) * g
26564// v_t <- max(beta2 * v_{t-1}, abs(g))
26565// variable <- variable - learning_rate / (1 - beta1^t) * m_t / (v_t + epsilon)
26566//
26567// Arguments:
26568//	var_: Should be from a Variable().
26569//	m: Should be from a Variable().
26570//	v: Should be from a Variable().
26571//	beta1_power: Must be a scalar.
26572//	lr: Scaling factor. Must be a scalar.
26573//	beta1: Momentum factor. Must be a scalar.
26574//	beta2: Momentum factor. Must be a scalar.
26575//	epsilon: Ridge term. Must be a scalar.
26576//	grad: The gradient.
26577//
26578// Returns the created operation.
26579func ResourceApplyAdaMax(scope *Scope, var_ tf.Output, m tf.Output, v tf.Output, beta1_power tf.Output, lr tf.Output, beta1 tf.Output, beta2 tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyAdaMaxAttr) (o *tf.Operation) {
26580	if scope.Err() != nil {
26581		return
26582	}
26583	attrs := map[string]interface{}{}
26584	for _, a := range optional {
26585		a(attrs)
26586	}
26587	opspec := tf.OpSpec{
26588		Type: "ResourceApplyAdaMax",
26589		Input: []tf.Input{
26590			var_, m, v, beta1_power, lr, beta1, beta2, epsilon, grad,
26591		},
26592		Attrs: attrs,
26593	}
26594	return scope.AddOperation(opspec)
26595}
26596
26597// Computes atan of x element-wise.
26598func Atan(scope *Scope, x tf.Output) (y tf.Output) {
26599	if scope.Err() != nil {
26600		return
26601	}
26602	opspec := tf.OpSpec{
26603		Type: "Atan",
26604		Input: []tf.Input{
26605			x,
26606		},
26607	}
26608	op := scope.AddOperation(opspec)
26609	return op.Output(0)
26610}
26611
26612// AssertAttr is an optional argument to Assert.
26613type AssertAttr func(optionalAttr)
26614
26615// AssertSummarize sets the optional summarize attribute to value.
26616//
26617// value: Print this many entries of each tensor.
26618// If not specified, defaults to 3
26619func AssertSummarize(value int64) AssertAttr {
26620	return func(m optionalAttr) {
26621		m["summarize"] = value
26622	}
26623}
26624
26625// Asserts that the given condition is true.
26626//
26627// If `condition` evaluates to false, print the list of tensors in `data`.
26628// `summarize` determines how many entries of the tensors to print.
26629//
26630// Arguments:
26631//	condition: The condition to evaluate.
26632//	data: The tensors to print out when condition is false.
26633//
26634// Returns the created operation.
26635func Assert(scope *Scope, condition tf.Output, data []tf.Output, optional ...AssertAttr) (o *tf.Operation) {
26636	if scope.Err() != nil {
26637		return
26638	}
26639	attrs := map[string]interface{}{}
26640	for _, a := range optional {
26641		a(attrs)
26642	}
26643	opspec := tf.OpSpec{
26644		Type: "Assert",
26645		Input: []tf.Input{
26646			condition, tf.OutputList(data),
26647		},
26648		Attrs: attrs,
26649	}
26650	return scope.AddOperation(opspec)
26651}
26652
26653// LoadTPUEmbeddingAdagradParametersGradAccumDebugAttr is an optional argument to LoadTPUEmbeddingAdagradParametersGradAccumDebug.
26654type LoadTPUEmbeddingAdagradParametersGradAccumDebugAttr func(optionalAttr)
26655
26656// LoadTPUEmbeddingAdagradParametersGradAccumDebugTableId sets the optional table_id attribute to value.
26657// If not specified, defaults to -1
26658//
26659// REQUIRES: value >= -1
26660func LoadTPUEmbeddingAdagradParametersGradAccumDebugTableId(value int64) LoadTPUEmbeddingAdagradParametersGradAccumDebugAttr {
26661	return func(m optionalAttr) {
26662		m["table_id"] = value
26663	}
26664}
26665
26666// LoadTPUEmbeddingAdagradParametersGradAccumDebugTableName sets the optional table_name attribute to value.
26667// If not specified, defaults to ""
26668func LoadTPUEmbeddingAdagradParametersGradAccumDebugTableName(value string) LoadTPUEmbeddingAdagradParametersGradAccumDebugAttr {
26669	return func(m optionalAttr) {
26670		m["table_name"] = value
26671	}
26672}
26673
26674// Load Adagrad embedding parameters with debug support.
26675//
26676// An op that loads optimization parameters into HBM for embedding. Must be
26677// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
26678// embedding table configuration. For example, this op is used to install
26679// parameters that are loaded from a checkpoint before a training loop is
26680// executed.
26681//
26682// Arguments:
26683//	parameters: Value of parameters used in the Adagrad optimization algorithm.
26684//	accumulators: Value of accumulators used in the Adagrad optimization algorithm.
26685//	gradient_accumulators: Value of gradient_accumulators used in the Adagrad optimization algorithm.
26686//
26687//
26688//
26689// Returns the created operation.
26690func LoadTPUEmbeddingAdagradParametersGradAccumDebug(scope *Scope, parameters tf.Output, accumulators tf.Output, gradient_accumulators tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingAdagradParametersGradAccumDebugAttr) (o *tf.Operation) {
26691	if scope.Err() != nil {
26692		return
26693	}
26694	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
26695	for _, a := range optional {
26696		a(attrs)
26697	}
26698	opspec := tf.OpSpec{
26699		Type: "LoadTPUEmbeddingAdagradParametersGradAccumDebug",
26700		Input: []tf.Input{
26701			parameters, accumulators, gradient_accumulators,
26702		},
26703		Attrs: attrs,
26704	}
26705	return scope.AddOperation(opspec)
26706}
26707
26708// RetrieveTPUEmbeddingFTRLParametersGradAccumDebugAttr is an optional argument to RetrieveTPUEmbeddingFTRLParametersGradAccumDebug.
26709type RetrieveTPUEmbeddingFTRLParametersGradAccumDebugAttr func(optionalAttr)
26710
26711// RetrieveTPUEmbeddingFTRLParametersGradAccumDebugTableId sets the optional table_id attribute to value.
26712// If not specified, defaults to -1
26713//
26714// REQUIRES: value >= -1
26715func RetrieveTPUEmbeddingFTRLParametersGradAccumDebugTableId(value int64) RetrieveTPUEmbeddingFTRLParametersGradAccumDebugAttr {
26716	return func(m optionalAttr) {
26717		m["table_id"] = value
26718	}
26719}
26720
26721// RetrieveTPUEmbeddingFTRLParametersGradAccumDebugTableName sets the optional table_name attribute to value.
26722// If not specified, defaults to ""
26723func RetrieveTPUEmbeddingFTRLParametersGradAccumDebugTableName(value string) RetrieveTPUEmbeddingFTRLParametersGradAccumDebugAttr {
26724	return func(m optionalAttr) {
26725		m["table_name"] = value
26726	}
26727}
26728
26729// Retrieve FTRL embedding parameters with debug support.
26730//
26731// An op that retrieves optimization parameters from embedding to host
26732// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
26733// the correct embedding table configuration. For example, this op is
26734// used to retrieve updated parameters before saving a checkpoint.
26735//
26736// Returns Parameter parameters updated by the FTRL optimization algorithm.Parameter accumulators updated by the FTRL optimization algorithm.Parameter linears updated by the FTRL optimization algorithm.Parameter gradient_accumulators updated by the FTRL optimization algorithm.
26737func RetrieveTPUEmbeddingFTRLParametersGradAccumDebug(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingFTRLParametersGradAccumDebugAttr) (parameters tf.Output, accumulators tf.Output, linears tf.Output, gradient_accumulators tf.Output) {
26738	if scope.Err() != nil {
26739		return
26740	}
26741	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
26742	for _, a := range optional {
26743		a(attrs)
26744	}
26745	opspec := tf.OpSpec{
26746		Type: "RetrieveTPUEmbeddingFTRLParametersGradAccumDebug",
26747
26748		Attrs: attrs,
26749	}
26750	op := scope.AddOperation(opspec)
26751	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
26752}
26753
26754// A dataset that splits the elements of its input into multiple elements.
26755func ExperimentalUnbatchDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
26756	if scope.Err() != nil {
26757		return
26758	}
26759	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
26760	opspec := tf.OpSpec{
26761		Type: "ExperimentalUnbatchDataset",
26762		Input: []tf.Input{
26763			input_dataset,
26764		},
26765		Attrs: attrs,
26766	}
26767	op := scope.AddOperation(opspec)
26768	return op.Output(0)
26769}
26770
26771// StringFormatAttr is an optional argument to StringFormat.
26772type StringFormatAttr func(optionalAttr)
26773
26774// StringFormatTemplate sets the optional template attribute to value.
26775//
26776// value: A string, the template to format tensor summaries into.
26777// If not specified, defaults to "%s"
26778func StringFormatTemplate(value string) StringFormatAttr {
26779	return func(m optionalAttr) {
26780		m["template"] = value
26781	}
26782}
26783
26784// StringFormatPlaceholder sets the optional placeholder attribute to value.
26785//
26786// value: A string, at each placeholder in the template a subsequent tensor summary will be inserted.
26787// If not specified, defaults to "%s"
26788func StringFormatPlaceholder(value string) StringFormatAttr {
26789	return func(m optionalAttr) {
26790		m["placeholder"] = value
26791	}
26792}
26793
26794// StringFormatSummarize sets the optional summarize attribute to value.
26795//
26796// value: When formatting the tensor summaries print the first and last summarize entries of each tensor dimension.
26797// If not specified, defaults to 3
26798func StringFormatSummarize(value int64) StringFormatAttr {
26799	return func(m optionalAttr) {
26800		m["summarize"] = value
26801	}
26802}
26803
26804// Formats a string template using a list of tensors.
26805//
26806// Formats a string template using a list of tensors, pretty-printing tensor summaries.
26807//
26808// Arguments:
26809//	inputs: The list of tensors to format into the placeholder string.
26810//
26811// Returns = The resulting string scalar.
26812func StringFormat(scope *Scope, inputs []tf.Output, optional ...StringFormatAttr) (output tf.Output) {
26813	if scope.Err() != nil {
26814		return
26815	}
26816	attrs := map[string]interface{}{}
26817	for _, a := range optional {
26818		a(attrs)
26819	}
26820	opspec := tf.OpSpec{
26821		Type: "StringFormat",
26822		Input: []tf.Input{
26823			tf.OutputList(inputs),
26824		},
26825		Attrs: attrs,
26826	}
26827	op := scope.AddOperation(opspec)
26828	return op.Output(0)
26829}
26830
26831// Returns true if queue is closed.
26832//
26833// This operation returns true if the queue is closed and false if the queue
26834// is open.
26835//
26836// Arguments:
26837//	handle: The handle to a queue.
26838func QueueIsClosedV2(scope *Scope, handle tf.Output) (is_closed tf.Output) {
26839	if scope.Err() != nil {
26840		return
26841	}
26842	opspec := tf.OpSpec{
26843		Type: "QueueIsClosedV2",
26844		Input: []tf.Input{
26845			handle,
26846		},
26847	}
26848	op := scope.AddOperation(opspec)
26849	return op.Output(0)
26850}
26851
26852// Computes inverse hyperbolic tangent of x element-wise.
26853func Atanh(scope *Scope, x tf.Output) (y tf.Output) {
26854	if scope.Err() != nil {
26855		return
26856	}
26857	opspec := tf.OpSpec{
26858		Type: "Atanh",
26859		Input: []tf.Input{
26860			x,
26861		},
26862	}
26863	op := scope.AddOperation(opspec)
26864	return op.Output(0)
26865}
26866
26867// Computes the reverse mode backpropagated gradient of the Cholesky algorithm.
26868//
26869// For an explanation see "Differentiation of the Cholesky algorithm" by
26870// Iain Murray http://arxiv.org/abs/1602.07527.
26871//
26872// Arguments:
26873//	l: Output of batch Cholesky algorithm l = cholesky(A). Shape is `[..., M, M]`.
26874// Algorithm depends only on lower triangular part of the innermost matrices of
26875// this tensor.
26876//	grad: df/dl where f is some scalar function. Shape is `[..., M, M]`.
26877// Algorithm depends only on lower triangular part of the innermost matrices of
26878// this tensor.
26879//
26880// Returns Symmetrized version of df/dA . Shape is `[..., M, M]`
26881func CholeskyGrad(scope *Scope, l tf.Output, grad tf.Output) (output tf.Output) {
26882	if scope.Err() != nil {
26883		return
26884	}
26885	opspec := tf.OpSpec{
26886		Type: "CholeskyGrad",
26887		Input: []tf.Input{
26888			l, grad,
26889		},
26890	}
26891	op := scope.AddOperation(opspec)
26892	return op.Output(0)
26893}
26894
26895// Assigns a new value to a variable.
26896//
26897// Any ReadVariableOp with a control dependency on this op is guaranteed to return
26898// this value or a subsequent newer value of the variable.
26899//
26900// Arguments:
26901//	resource: handle to the resource in which to store the variable.
26902//	value: the value to set the new tensor to use.
26903//
26904// Returns the created operation.
26905func AssignVariableOp(scope *Scope, resource tf.Output, value tf.Output) (o *tf.Operation) {
26906	if scope.Err() != nil {
26907		return
26908	}
26909	opspec := tf.OpSpec{
26910		Type: "AssignVariableOp",
26911		Input: []tf.Input{
26912			resource, value,
26913		},
26914	}
26915	return scope.AddOperation(opspec)
26916}
26917
26918// Returns a tensor of ones with the same shape and type as x.
26919//
26920// Arguments:
26921//	x: a tensor of type T.
26922//
26923// Returns a tensor of the same shape and type as x but filled with ones.
26924func OnesLike(scope *Scope, x tf.Output) (y tf.Output) {
26925	if scope.Err() != nil {
26926		return
26927	}
26928	opspec := tf.OpSpec{
26929		Type: "OnesLike",
26930		Input: []tf.Input{
26931			x,
26932		},
26933	}
26934	op := scope.AddOperation(opspec)
26935	return op.Output(0)
26936}
26937
26938// The gradient of SparseFillEmptyRows.
26939//
26940// Takes vectors reverse_index_map, shaped `[N]`, and grad_values,
26941// shaped `[N_full]`, where `N_full >= N` and copies data into either
26942// `d_values` or `d_default_value`.  Here `d_values` is shaped `[N]` and
26943// `d_default_value` is a scalar.
26944//
26945//   d_values[j] = grad_values[reverse_index_map[j]]
26946//   d_default_value = sum_{k : 0 .. N_full - 1} (
26947//      grad_values[k] * 1{k not in reverse_index_map})
26948//
26949// Arguments:
26950//	reverse_index_map: 1-D.  The reverse index map from SparseFillEmptyRows.
26951//	grad_values: 1-D.  The gradients from backprop.
26952//
26953// Returns 1-D.  The backprop into values.0-D.  The backprop into default_value.
26954func SparseFillEmptyRowsGrad(scope *Scope, reverse_index_map tf.Output, grad_values tf.Output) (d_values tf.Output, d_default_value tf.Output) {
26955	if scope.Err() != nil {
26956		return
26957	}
26958	opspec := tf.OpSpec{
26959		Type: "SparseFillEmptyRowsGrad",
26960		Input: []tf.Input{
26961			reverse_index_map, grad_values,
26962		},
26963	}
26964	op := scope.AddOperation(opspec)
26965	return op.Output(0), op.Output(1)
26966}
26967
26968// Creates a dataset that zips together `input_datasets`.
26969func ZipDataset(scope *Scope, input_datasets []tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
26970	if scope.Err() != nil {
26971		return
26972	}
26973	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
26974	opspec := tf.OpSpec{
26975		Type: "ZipDataset",
26976		Input: []tf.Input{
26977			tf.OutputList(input_datasets),
26978		},
26979		Attrs: attrs,
26980	}
26981	op := scope.AddOperation(opspec)
26982	return op.Output(0)
26983}
26984
26985// LoadTPUEmbeddingAdagradParametersAttr is an optional argument to LoadTPUEmbeddingAdagradParameters.
26986type LoadTPUEmbeddingAdagradParametersAttr func(optionalAttr)
26987
26988// LoadTPUEmbeddingAdagradParametersTableId sets the optional table_id attribute to value.
26989// If not specified, defaults to -1
26990//
26991// REQUIRES: value >= -1
26992func LoadTPUEmbeddingAdagradParametersTableId(value int64) LoadTPUEmbeddingAdagradParametersAttr {
26993	return func(m optionalAttr) {
26994		m["table_id"] = value
26995	}
26996}
26997
26998// LoadTPUEmbeddingAdagradParametersTableName sets the optional table_name attribute to value.
26999// If not specified, defaults to ""
27000func LoadTPUEmbeddingAdagradParametersTableName(value string) LoadTPUEmbeddingAdagradParametersAttr {
27001	return func(m optionalAttr) {
27002		m["table_name"] = value
27003	}
27004}
27005
27006// Load Adagrad embedding parameters.
27007//
27008// An op that loads optimization parameters into HBM for embedding. Must be
27009// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
27010// embedding table configuration. For example, this op is used to install
27011// parameters that are loaded from a checkpoint before a training loop is
27012// executed.
27013//
27014// Arguments:
27015//	parameters: Value of parameters used in the Adagrad optimization algorithm.
27016//	accumulators: Value of accumulators used in the Adagrad optimization algorithm.
27017//
27018//
27019//
27020// Returns the created operation.
27021func LoadTPUEmbeddingAdagradParameters(scope *Scope, parameters tf.Output, accumulators tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingAdagradParametersAttr) (o *tf.Operation) {
27022	if scope.Err() != nil {
27023		return
27024	}
27025	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
27026	for _, a := range optional {
27027		a(attrs)
27028	}
27029	opspec := tf.OpSpec{
27030		Type: "LoadTPUEmbeddingAdagradParameters",
27031		Input: []tf.Input{
27032			parameters, accumulators,
27033		},
27034		Attrs: attrs,
27035	}
27036	return scope.AddOperation(opspec)
27037}
27038
27039// Strip leading and trailing whitespaces from the Tensor.
27040//
27041// Arguments:
27042//	input: A string `Tensor` of any shape.
27043//
27044// Returns A string `Tensor` of the same shape as the input.
27045func StringStrip(scope *Scope, input tf.Output) (output tf.Output) {
27046	if scope.Err() != nil {
27047		return
27048	}
27049	opspec := tf.OpSpec{
27050		Type: "StringStrip",
27051		Input: []tf.Input{
27052			input,
27053		},
27054	}
27055	op := scope.AddOperation(opspec)
27056	return op.Output(0)
27057}
27058
27059// Converts each string in the input Tensor to its hash mod by a number of buckets.
27060//
27061// The hash function is deterministic on the content of the string within the
27062// process. The hash function is a keyed hash function, where attribute `key`
27063// defines the key of the hash function. `key` is an array of 2 elements.
27064//
27065// A strong hash is important when inputs may be malicious, e.g. URLs with
27066// additional components. Adversaries could try to make their inputs hash to the
27067// same bucket for a denial-of-service attack or to skew the results. A strong
27068// hash prevents this by making it difficult, if not infeasible, to compute inputs
27069// that hash to the same bucket. This comes at a cost of roughly 4x higher compute
27070// time than `tf.string_to_hash_bucket_fast`.
27071//
27072// Arguments:
27073//	input: The strings to assign a hash bucket.
27074//	num_buckets: The number of buckets.
27075//	key: The key for the keyed hash function passed as a list of two uint64
27076// elements.
27077//
27078// Returns A Tensor of the same shape as the input `string_tensor`.
27079func StringToHashBucketStrong(scope *Scope, input tf.Output, num_buckets int64, key []int64) (output tf.Output) {
27080	if scope.Err() != nil {
27081		return
27082	}
27083	attrs := map[string]interface{}{"num_buckets": num_buckets, "key": key}
27084	opspec := tf.OpSpec{
27085		Type: "StringToHashBucketStrong",
27086		Input: []tf.Input{
27087			input,
27088		},
27089		Attrs: attrs,
27090	}
27091	op := scope.AddOperation(opspec)
27092	return op.Output(0)
27093}
27094
27095// StringLengthAttr is an optional argument to StringLength.
27096type StringLengthAttr func(optionalAttr)
27097
27098// StringLengthUnit sets the optional unit attribute to value.
27099//
27100// value: The unit that is counted to compute string length.  One of: `"BYTE"` (for
27101// the number of bytes in each string) or `"UTF8_CHAR"` (for the number of UTF-8
27102// encoded Unicode code points in each string).  Results are undefined
27103// if `unit=UTF8_CHAR` and the `input` strings do not contain structurally
27104// valid UTF-8.
27105// If not specified, defaults to "BYTE"
27106func StringLengthUnit(value string) StringLengthAttr {
27107	return func(m optionalAttr) {
27108		m["unit"] = value
27109	}
27110}
27111
27112// String lengths of `input`.
27113//
27114// Computes the length of each string given in the input tensor.
27115//
27116// Arguments:
27117//	input: The string for which to compute the length.
27118//
27119// Returns Integer tensor that has the same shape as `input`. The output contains the
27120// element-wise string lengths of `input`.
27121func StringLength(scope *Scope, input tf.Output, optional ...StringLengthAttr) (output tf.Output) {
27122	if scope.Err() != nil {
27123		return
27124	}
27125	attrs := map[string]interface{}{}
27126	for _, a := range optional {
27127		a(attrs)
27128	}
27129	opspec := tf.OpSpec{
27130		Type: "StringLength",
27131		Input: []tf.Input{
27132			input,
27133		},
27134		Attrs: attrs,
27135	}
27136	op := scope.AddOperation(opspec)
27137	return op.Output(0)
27138}
27139
27140// Performs gradient updates of embedding tables.
27141//
27142// Arguments:
27143//	inputs: A TensorList of gradients with which to update embedding tables.
27144// This argument has the same length and shapes as the return value of
27145// RecvTPUEmbeddingActivations, but contains gradients of the model's loss
27146// with respect to the embedding activations. The embedding tables are updated
27147// from these gradients via the optimizer specified in the TPU embedding
27148// configuration given to tpu.initialize_system.
27149//	learning_rates: A TensorList of float32 scalars, one for each dynamic learning
27150// rate tag: see the comments in
27151// //third_party/tensorflow/core/protobuf/tpu/optimization_parameters.proto.
27152// Multiple tables can share the same dynamic learning rate tag as specified
27153// in the configuration. If the learning rates for all tables are constant,
27154// this list should be empty.
27155//	config: Serialized TPUEmbeddingConfiguration proto.
27156//
27157// Returns the created operation.
27158func SendTPUEmbeddingGradients(scope *Scope, inputs []tf.Output, learning_rates []tf.Output, config string) (o *tf.Operation) {
27159	if scope.Err() != nil {
27160		return
27161	}
27162	attrs := map[string]interface{}{"config": config}
27163	opspec := tf.OpSpec{
27164		Type: "SendTPUEmbeddingGradients",
27165		Input: []tf.Input{
27166			tf.OutputList(inputs), tf.OutputList(learning_rates),
27167		},
27168		Attrs: attrs,
27169	}
27170	return scope.AddOperation(opspec)
27171}
27172
27173// Computes numerical negative value element-wise.
27174//
27175// I.e., \\(y = -x\\).
27176func Neg(scope *Scope, x tf.Output) (y tf.Output) {
27177	if scope.Err() != nil {
27178		return
27179	}
27180	opspec := tf.OpSpec{
27181		Type: "Neg",
27182		Input: []tf.Input{
27183			x,
27184		},
27185	}
27186	op := scope.AddOperation(opspec)
27187	return op.Output(0)
27188}
27189
27190// Receives a tensor value broadcast from another device.
27191func CollectiveBcastRecv(scope *Scope, T tf.DataType, group_size int64, group_key int64, instance_key int64, shape tf.Shape) (data tf.Output) {
27192	if scope.Err() != nil {
27193		return
27194	}
27195	attrs := map[string]interface{}{"T": T, "group_size": group_size, "group_key": group_key, "instance_key": instance_key, "shape": shape}
27196	opspec := tf.OpSpec{
27197		Type: "CollectiveBcastRecv",
27198
27199		Attrs: attrs,
27200	}
27201	op := scope.AddOperation(opspec)
27202	return op.Output(0)
27203}
27204
27205// Decode web-safe base64-encoded strings.
27206//
27207// Input may or may not have padding at the end. See EncodeBase64 for padding.
27208// Web-safe means that input must use - and _ instead of + and /.
27209//
27210// Arguments:
27211//	input: Base64 strings to decode.
27212//
27213// Returns Decoded strings.
27214func DecodeBase64(scope *Scope, input tf.Output) (output tf.Output) {
27215	if scope.Err() != nil {
27216		return
27217	}
27218	opspec := tf.OpSpec{
27219		Type: "DecodeBase64",
27220		Input: []tf.Input{
27221			input,
27222		},
27223	}
27224	op := scope.AddOperation(opspec)
27225	return op.Output(0)
27226}
27227
27228// SubstrAttr is an optional argument to Substr.
27229type SubstrAttr func(optionalAttr)
27230
27231// SubstrUnit sets the optional unit attribute to value.
27232//
27233// value: The unit that is used to create the substring.  One of: `"BYTE"` (for
27234// defining position and length by bytes) or `"UTF8_CHAR"` (for the UTF-8
27235// encoded Unicode code points).  The default is `"BYTE"`. Results are undefined if
27236// `unit=UTF8_CHAR` and the `input` strings do not contain structurally valid
27237// UTF-8.
27238// If not specified, defaults to "BYTE"
27239func SubstrUnit(value string) SubstrAttr {
27240	return func(m optionalAttr) {
27241		m["unit"] = value
27242	}
27243}
27244
27245// Return substrings from `Tensor` of strings.
27246//
27247// For each string in the input `Tensor`, creates a substring starting at index
27248// `pos` with a total length of `len`.
27249//
27250// If `len` defines a substring that would extend beyond the length of the input
27251// string, then as many characters as possible are used.
27252//
27253// A negative `pos` indicates distance within the string backwards from the end.
27254//
27255// If `pos` specifies an index which is out of range for any of the input strings,
27256// then an `InvalidArgumentError` is thrown.
27257//
27258// `pos` and `len` must have the same shape, otherwise a `ValueError` is thrown on
27259// Op creation.
27260//
27261// *NOTE*: `Substr` supports broadcasting up to two dimensions. More about
27262// broadcasting
27263// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
27264//
27265// ---
27266//
27267// Examples
27268//
27269// Using scalar `pos` and `len`:
27270//
27271// ```python
27272// input = [b'Hello', b'World']
27273// position = 1
27274// length = 3
27275//
27276// output = [b'ell', b'orl']
27277// ```
27278//
27279// Using `pos` and `len` with same shape as `input`:
27280//
27281// ```python
27282// input = [[b'ten', b'eleven', b'twelve'],
27283//          [b'thirteen', b'fourteen', b'fifteen'],
27284//          [b'sixteen', b'seventeen', b'eighteen']]
27285// position = [[1, 2, 3],
27286//             [1, 2, 3],
27287//             [1, 2, 3]]
27288// length =   [[2, 3, 4],
27289//             [4, 3, 2],
27290//             [5, 5, 5]]
27291//
27292// output = [[b'en', b'eve', b'lve'],
27293//           [b'hirt', b'urt', b'te'],
27294//           [b'ixtee', b'vente', b'hteen']]
27295// ```
27296//
27297// Broadcasting `pos` and `len` onto `input`:
27298//
27299// ```
27300// input = [[b'ten', b'eleven', b'twelve'],
27301//          [b'thirteen', b'fourteen', b'fifteen'],
27302//          [b'sixteen', b'seventeen', b'eighteen'],
27303//          [b'nineteen', b'twenty', b'twentyone']]
27304// position = [1, 2, 3]
27305// length =   [1, 2, 3]
27306//
27307// output = [[b'e', b'ev', b'lve'],
27308//           [b'h', b'ur', b'tee'],
27309//           [b'i', b've', b'hte'],
27310//           [b'i', b'en', b'nty']]
27311// ```
27312//
27313// Broadcasting `input` onto `pos` and `len`:
27314//
27315// ```
27316// input = b'thirteen'
27317// position = [1, 5, 7]
27318// length =   [3, 2, 1]
27319//
27320// output = [b'hir', b'ee', b'n']
27321// ```
27322//
27323// Arguments:
27324//	input: Tensor of strings
27325//	pos: Scalar defining the position of first character in each substring
27326//	len: Scalar defining the number of characters to include in each substring
27327//
27328// Returns Tensor of substrings
27329func Substr(scope *Scope, input tf.Output, pos tf.Output, len tf.Output, optional ...SubstrAttr) (output tf.Output) {
27330	if scope.Err() != nil {
27331		return
27332	}
27333	attrs := map[string]interface{}{}
27334	for _, a := range optional {
27335		a(attrs)
27336	}
27337	opspec := tf.OpSpec{
27338		Type: "Substr",
27339		Input: []tf.Input{
27340			input, pos, len,
27341		},
27342		Attrs: attrs,
27343	}
27344	op := scope.AddOperation(opspec)
27345	return op.Output(0)
27346}
27347
27348// Exits the current frame to its parent frame.
27349//
27350// Exit makes its input `data` available to the parent frame.
27351//
27352// Arguments:
27353//	data: The tensor to be made available to the parent frame.
27354//
27355// Returns The same tensor as `data`.
27356func Exit(scope *Scope, data tf.Output) (output tf.Output) {
27357	if scope.Err() != nil {
27358		return
27359	}
27360	opspec := tf.OpSpec{
27361		Type: "Exit",
27362		Input: []tf.Input{
27363			data,
27364		},
27365	}
27366	op := scope.AddOperation(opspec)
27367	return op.Output(0)
27368}
27369
27370// RetrieveTPUEmbeddingProximalAdagradParametersAttr is an optional argument to RetrieveTPUEmbeddingProximalAdagradParameters.
27371type RetrieveTPUEmbeddingProximalAdagradParametersAttr func(optionalAttr)
27372
27373// RetrieveTPUEmbeddingProximalAdagradParametersTableId sets the optional table_id attribute to value.
27374// If not specified, defaults to -1
27375//
27376// REQUIRES: value >= -1
27377func RetrieveTPUEmbeddingProximalAdagradParametersTableId(value int64) RetrieveTPUEmbeddingProximalAdagradParametersAttr {
27378	return func(m optionalAttr) {
27379		m["table_id"] = value
27380	}
27381}
27382
27383// RetrieveTPUEmbeddingProximalAdagradParametersTableName sets the optional table_name attribute to value.
27384// If not specified, defaults to ""
27385func RetrieveTPUEmbeddingProximalAdagradParametersTableName(value string) RetrieveTPUEmbeddingProximalAdagradParametersAttr {
27386	return func(m optionalAttr) {
27387		m["table_name"] = value
27388	}
27389}
27390
27391// Retrieve proximal Adagrad embedding parameters.
27392//
27393// An op that retrieves optimization parameters from embedding to host
27394// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
27395// the correct embedding table configuration. For example, this op is
27396// used to retrieve updated parameters before saving a checkpoint.
27397//
27398// Returns Parameter parameters updated by the proximal Adagrad optimization algorithm.Parameter accumulators updated by the proximal Adagrad optimization algorithm.
27399func RetrieveTPUEmbeddingProximalAdagradParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingProximalAdagradParametersAttr) (parameters tf.Output, accumulators tf.Output) {
27400	if scope.Err() != nil {
27401		return
27402	}
27403	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
27404	for _, a := range optional {
27405		a(attrs)
27406	}
27407	opspec := tf.OpSpec{
27408		Type: "RetrieveTPUEmbeddingProximalAdagradParameters",
27409
27410		Attrs: attrs,
27411	}
27412	op := scope.AddOperation(opspec)
27413	return op.Output(0), op.Output(1)
27414}
27415
27416// Produce a string tensor that encodes the state of a Reader.
27417//
27418// Not all Readers support being serialized, so this can produce an
27419// Unimplemented error.
27420//
27421// Arguments:
27422//	reader_handle: Handle to a Reader.
27423func ReaderSerializeStateV2(scope *Scope, reader_handle tf.Output) (state tf.Output) {
27424	if scope.Err() != nil {
27425		return
27426	}
27427	opspec := tf.OpSpec{
27428		Type: "ReaderSerializeStateV2",
27429		Input: []tf.Input{
27430			reader_handle,
27431		},
27432	}
27433	op := scope.AddOperation(opspec)
27434	return op.Output(0)
27435}
27436
27437// Returns the number of tensors in the input tensor list.
27438//
27439// input_handle: the input list
27440// length: the number of tensors in the list
27441func TensorListLength(scope *Scope, input_handle tf.Output) (length tf.Output) {
27442	if scope.Err() != nil {
27443		return
27444	}
27445	opspec := tf.OpSpec{
27446		Type: "TensorListLength",
27447		Input: []tf.Input{
27448			input_handle,
27449		},
27450	}
27451	op := scope.AddOperation(opspec)
27452	return op.Output(0)
27453}
27454
27455// Creates a dataset with a range of values. Corresponds to python's xrange.
27456//
27457// Arguments:
27458//	start: corresponds to start in python's xrange().
27459//	stop: corresponds to stop in python's xrange().
27460//	step: corresponds to step in python's xrange().
27461//
27462//
27463func RangeDataset(scope *Scope, start tf.Output, stop tf.Output, step tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
27464	if scope.Err() != nil {
27465		return
27466	}
27467	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
27468	opspec := tf.OpSpec{
27469		Type: "RangeDataset",
27470		Input: []tf.Input{
27471			start, stop, step,
27472		},
27473		Attrs: attrs,
27474	}
27475	op := scope.AddOperation(opspec)
27476	return op.Output(0)
27477}
27478
27479// Computes inverse hyperbolic sine of x element-wise.
27480func Asinh(scope *Scope, x tf.Output) (y tf.Output) {
27481	if scope.Err() != nil {
27482		return
27483	}
27484	opspec := tf.OpSpec{
27485		Type: "Asinh",
27486		Input: []tf.Input{
27487			x,
27488		},
27489	}
27490	op := scope.AddOperation(opspec)
27491	return op.Output(0)
27492}
27493
27494// UnicodeTranscodeAttr is an optional argument to UnicodeTranscode.
27495type UnicodeTranscodeAttr func(optionalAttr)
27496
27497// UnicodeTranscodeErrors sets the optional errors attribute to value.
27498//
27499// value: Error handling policy when there is invalid formatting found in the input.
27500// The value of 'strict' will cause the operation to produce a InvalidArgument
27501// error on any invalid input formatting. A value of 'replace' (the default) will
27502// cause the operation to replace any invalid formatting in the input with the
27503// `replacement_char` codepoint. A value of 'ignore' will cause the operation to
27504// skip any invalid formatting in the input and produce no corresponding output
27505// character.
27506// If not specified, defaults to "replace"
27507func UnicodeTranscodeErrors(value string) UnicodeTranscodeAttr {
27508	return func(m optionalAttr) {
27509		m["errors"] = value
27510	}
27511}
27512
27513// UnicodeTranscodeReplacementChar sets the optional replacement_char attribute to value.
27514//
27515// value: The replacement character codepoint to be used in place of any invalid
27516// formatting in the input when `errors='replace'`. Any valid unicode codepoint may
27517// be used. The default value is the default unicode replacement character is
27518// 0xFFFD or U+65533.)
27519//
27520// Note that for UTF-8, passing a replacement character expressible in 1 byte, such
27521// as ' ', will preserve string alignment to the source since invalid bytes will be
27522// replaced with a 1-byte replacement. For UTF-16-BE and UTF-16-LE, any 1 or 2 byte
27523// replacement character will preserve byte alignment to the source.
27524// If not specified, defaults to 65533
27525func UnicodeTranscodeReplacementChar(value int64) UnicodeTranscodeAttr {
27526	return func(m optionalAttr) {
27527		m["replacement_char"] = value
27528	}
27529}
27530
27531// UnicodeTranscodeReplaceControlCharacters sets the optional replace_control_characters attribute to value.
27532//
27533// value: Whether to replace the C0 control characters (00-1F) with the
27534// `replacement_char`. Default is false.
27535// If not specified, defaults to false
27536func UnicodeTranscodeReplaceControlCharacters(value bool) UnicodeTranscodeAttr {
27537	return func(m optionalAttr) {
27538		m["replace_control_characters"] = value
27539	}
27540}
27541
27542// Transcode the input text from a source encoding to a destination encoding.
27543//
27544// The input is a string tensor of any shape. The output is a string tensor of
27545// the same shape containing the transcoded strings. Output strings are always
27546// valid unicode. If the input contains invalid encoding positions, the
27547// `errors` attribute sets the policy for how to deal with them. If the default
27548// error-handling policy is used, invalid formatting will be substituted in the
27549// output by the `replacement_char`. If the errors policy is to `ignore`, any
27550// invalid encoding positions in the input are skipped and not included in the
27551// output. If it set to `strict` then any invalid formatting will result in an
27552// InvalidArgument error.
27553//
27554// This operation can be used with `output_encoding = input_encoding` to enforce
27555// correct formatting for inputs even if they are already in the desired encoding.
27556//
27557// If the input is prefixed by a Byte Order Mark needed to determine encoding
27558// (e.g. if the encoding is UTF-16 and the BOM indicates big-endian), then that
27559// BOM will be consumed and not emitted into the output. If the input encoding
27560// is marked with an explicit endianness (e.g. UTF-16-BE), then the BOM is
27561// interpreted as a non-breaking-space and is preserved in the output (including
27562// always for UTF-8).
27563//
27564// The end result is that if the input is marked as an explicit endianness the
27565// transcoding is faithful to all codepoints in the source. If it is not marked
27566// with an explicit endianness, the BOM is not considered part of the string itself
27567// but as metadata, and so is not preserved in the output.
27568//
27569// Arguments:
27570//	input: The text to be processed. Can have any shape.
27571//	input_encoding: Text encoding of the input strings. This is any of the encodings supported
27572// by ICU ucnv algorithmic converters. Examples: `"UTF-16", "US ASCII", "UTF-8"`.
27573//	output_encoding: The unicode encoding to use in the output. Must be one of
27574// `"UTF-8", "UTF-16-BE", "UTF-32-BE"`. Multi-byte encodings will be big-endian.
27575//
27576// Returns A string tensor containing unicode text encoded using `output_encoding`.
27577func UnicodeTranscode(scope *Scope, input tf.Output, input_encoding string, output_encoding string, optional ...UnicodeTranscodeAttr) (output tf.Output) {
27578	if scope.Err() != nil {
27579		return
27580	}
27581	attrs := map[string]interface{}{"input_encoding": input_encoding, "output_encoding": output_encoding}
27582	for _, a := range optional {
27583		a(attrs)
27584	}
27585	opspec := tf.OpSpec{
27586		Type: "UnicodeTranscode",
27587		Input: []tf.Input{
27588			input,
27589		},
27590		Attrs: attrs,
27591	}
27592	op := scope.AddOperation(opspec)
27593	return op.Output(0)
27594}
27595
27596// ResourceApplyRMSPropAttr is an optional argument to ResourceApplyRMSProp.
27597type ResourceApplyRMSPropAttr func(optionalAttr)
27598
27599// ResourceApplyRMSPropUseLocking sets the optional use_locking attribute to value.
27600//
27601// value: If `True`, updating of the var, ms, and mom tensors is protected
27602// by a lock; otherwise the behavior is undefined, but may exhibit less
27603// contention.
27604// If not specified, defaults to false
27605func ResourceApplyRMSPropUseLocking(value bool) ResourceApplyRMSPropAttr {
27606	return func(m optionalAttr) {
27607		m["use_locking"] = value
27608	}
27609}
27610
27611// Update '*var' according to the RMSProp algorithm.
27612//
27613// Note that in dense implementation of this algorithm, ms and mom will
27614// update even if the grad is zero, but in this sparse implementation, ms
27615// and mom will not update in iterations during which the grad is zero.
27616//
27617// mean_square = decay * mean_square + (1-decay) * gradient ** 2
27618// Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
27619//
27620// ms <- rho * ms_{t-1} + (1-rho) * grad * grad
27621// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
27622// var <- var - mom
27623//
27624// Arguments:
27625//	var_: Should be from a Variable().
27626//	ms: Should be from a Variable().
27627//	mom: Should be from a Variable().
27628//	lr: Scaling factor. Must be a scalar.
27629//	rho: Decay rate. Must be a scalar.
27630//
27631//	epsilon: Ridge term. Must be a scalar.
27632//	grad: The gradient.
27633//
27634// Returns the created operation.
27635func ResourceApplyRMSProp(scope *Scope, var_ tf.Output, ms tf.Output, mom tf.Output, lr tf.Output, rho tf.Output, momentum tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyRMSPropAttr) (o *tf.Operation) {
27636	if scope.Err() != nil {
27637		return
27638	}
27639	attrs := map[string]interface{}{}
27640	for _, a := range optional {
27641		a(attrs)
27642	}
27643	opspec := tf.OpSpec{
27644		Type: "ResourceApplyRMSProp",
27645		Input: []tf.Input{
27646			var_, ms, mom, lr, rho, momentum, epsilon, grad,
27647		},
27648		Attrs: attrs,
27649	}
27650	return scope.AddOperation(opspec)
27651}
27652
27653// StatelessTruncatedNormalAttr is an optional argument to StatelessTruncatedNormal.
27654type StatelessTruncatedNormalAttr func(optionalAttr)
27655
27656// StatelessTruncatedNormalDtype sets the optional dtype attribute to value.
27657//
27658// value: The type of the output.
27659// If not specified, defaults to DT_FLOAT
27660func StatelessTruncatedNormalDtype(value tf.DataType) StatelessTruncatedNormalAttr {
27661	return func(m optionalAttr) {
27662		m["dtype"] = value
27663	}
27664}
27665
27666// Outputs deterministic pseudorandom values from a truncated normal distribution.
27667//
27668// The generated values follow a normal distribution with mean 0 and standard
27669// deviation 1, except that values whose magnitude is more than 2 standard
27670// deviations from the mean are dropped and re-picked.
27671//
27672// The outputs are a deterministic function of `shape` and `seed`.
27673//
27674// Arguments:
27675//	shape: The shape of the output tensor.
27676//	seed: 2 seeds (shape [2]).
27677//
27678// Returns Random values with specified shape.
27679func StatelessTruncatedNormal(scope *Scope, shape tf.Output, seed tf.Output, optional ...StatelessTruncatedNormalAttr) (output tf.Output) {
27680	if scope.Err() != nil {
27681		return
27682	}
27683	attrs := map[string]interface{}{}
27684	for _, a := range optional {
27685		a(attrs)
27686	}
27687	opspec := tf.OpSpec{
27688		Type: "StatelessTruncatedNormal",
27689		Input: []tf.Input{
27690			shape, seed,
27691		},
27692		Attrs: attrs,
27693	}
27694	op := scope.AddOperation(opspec)
27695	return op.Output(0)
27696}
27697
27698// RestoreSliceAttr is an optional argument to RestoreSlice.
27699type RestoreSliceAttr func(optionalAttr)
27700
27701// RestoreSlicePreferredShard sets the optional preferred_shard attribute to value.
27702//
27703// value: Index of file to open first if multiple files match
27704// `file_pattern`. See the documentation for `Restore`.
27705// If not specified, defaults to -1
27706func RestoreSlicePreferredShard(value int64) RestoreSliceAttr {
27707	return func(m optionalAttr) {
27708		m["preferred_shard"] = value
27709	}
27710}
27711
27712// Restores a tensor from checkpoint files.
27713//
27714// This is like `Restore` except that restored tensor can be listed as filling
27715// only a slice of a larger tensor.  `shape_and_slice` specifies the shape of the
27716// larger tensor and the slice that the restored tensor covers.
27717//
27718// The `shape_and_slice` input has the same format as the
27719// elements of the `shapes_and_slices` input of the `SaveSlices` op.
27720//
27721// Arguments:
27722//	file_pattern: Must have a single element. The pattern of the files from
27723// which we read the tensor.
27724//	tensor_name: Must have a single element. The name of the tensor to be
27725// restored.
27726//	shape_and_slice: Scalar. The shapes and slice specifications to use when
27727// restoring a tensors.
27728//	dt: The type of the tensor to be restored.
27729//
27730// Returns The restored tensor.
27731func RestoreSlice(scope *Scope, file_pattern tf.Output, tensor_name tf.Output, shape_and_slice tf.Output, dt tf.DataType, optional ...RestoreSliceAttr) (tensor tf.Output) {
27732	if scope.Err() != nil {
27733		return
27734	}
27735	attrs := map[string]interface{}{"dt": dt}
27736	for _, a := range optional {
27737		a(attrs)
27738	}
27739	opspec := tf.OpSpec{
27740		Type: "RestoreSlice",
27741		Input: []tf.Input{
27742			file_pattern, tensor_name, shape_and_slice,
27743		},
27744		Attrs: attrs,
27745	}
27746	op := scope.AddOperation(opspec)
27747	return op.Output(0)
27748}
27749
27750// Convert the quantized 'input' tensor into a lower-precision 'output', using the
27751//
27752// actual distribution of the values to maximize the usage of the lower bit depth
27753// and adjusting the output min and max ranges accordingly.
27754//
27755// [input_min, input_max] are scalar floats that specify the range for the float
27756// interpretation of the 'input' data. For example, if input_min is -1.0f and
27757// input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0
27758// value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.
27759//
27760// This operator tries to squeeze as much precision as possible into an output with
27761// a lower bit depth by calculating the actual min and max values found in the
27762// data. For example, maybe that quint16 input has no values lower than 16,384 and
27763// none higher than 49,152. That means only half the range is actually needed, all
27764// the float interpretations are between -0.5f and 0.5f, so if we want to compress
27765// the data into a quint8 output, we can use that range rather than the theoretical
27766// -1.0f to 1.0f that is suggested by the input min and max.
27767//
27768// In practice, this is most useful for taking output from operations like
27769// QuantizedMatMul that can produce higher bit-depth outputs than their inputs and
27770// may have large potential output ranges, but in practice have a distribution of
27771// input values that only uses a small fraction of the possible range. By feeding
27772// that output into this operator, we can reduce it from 32 bits down to 8 with
27773// minimal loss of accuracy.
27774//
27775// Arguments:
27776//
27777//	input_min: The float value that the minimum quantized input value represents.
27778//	input_max: The float value that the maximum quantized input value represents.
27779//	out_type: The type of the output. Should be a lower bit depth than Tinput.
27780//
27781// Returns The float value that the minimum quantized output value represents.The float value that the maximum quantized output value represents.
27782func QuantizeDownAndShrinkRange(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, out_type tf.DataType) (output tf.Output, output_min tf.Output, output_max tf.Output) {
27783	if scope.Err() != nil {
27784		return
27785	}
27786	attrs := map[string]interface{}{"out_type": out_type}
27787	opspec := tf.OpSpec{
27788		Type: "QuantizeDownAndShrinkRange",
27789		Input: []tf.Input{
27790			input, input_min, input_max,
27791		},
27792		Attrs: attrs,
27793	}
27794	op := scope.AddOperation(opspec)
27795	return op.Output(0), op.Output(1), op.Output(2)
27796}
27797
27798// RandomGammaAttr is an optional argument to RandomGamma.
27799type RandomGammaAttr func(optionalAttr)
27800
27801// RandomGammaSeed sets the optional seed attribute to value.
27802//
27803// value: If either `seed` or `seed2` are set to be non-zero, the random number
27804// generator is seeded by the given seed.  Otherwise, it is seeded by a
27805// random seed.
27806// If not specified, defaults to 0
27807func RandomGammaSeed(value int64) RandomGammaAttr {
27808	return func(m optionalAttr) {
27809		m["seed"] = value
27810	}
27811}
27812
27813// RandomGammaSeed2 sets the optional seed2 attribute to value.
27814//
27815// value: A second seed to avoid seed collision.
27816// If not specified, defaults to 0
27817func RandomGammaSeed2(value int64) RandomGammaAttr {
27818	return func(m optionalAttr) {
27819		m["seed2"] = value
27820	}
27821}
27822
27823// Outputs random values from the Gamma distribution(s) described by alpha.
27824//
27825// This op uses the algorithm by Marsaglia et al. to acquire samples via
27826// transformation-rejection from pairs of uniform and normal random variables.
27827// See http://dl.acm.org/citation.cfm?id=358414
27828//
27829// Arguments:
27830//	shape: 1-D integer tensor. Shape of independent samples to draw from each
27831// distribution described by the shape parameters given in alpha.
27832//	alpha: A tensor in which each scalar is a "shape" parameter describing the
27833// associated gamma distribution.
27834//
27835// Returns A tensor with shape `shape + shape(alpha)`. Each slice
27836// `[:, ..., :, i0, i1, ...iN]` contains the samples drawn for
27837// `alpha[i0, i1, ...iN]`. The dtype of the output matches the dtype of alpha.
27838func RandomGamma(scope *Scope, shape tf.Output, alpha tf.Output, optional ...RandomGammaAttr) (output tf.Output) {
27839	if scope.Err() != nil {
27840		return
27841	}
27842	attrs := map[string]interface{}{}
27843	for _, a := range optional {
27844		a(attrs)
27845	}
27846	opspec := tf.OpSpec{
27847		Type: "RandomGamma",
27848		Input: []tf.Input{
27849			shape, alpha,
27850		},
27851		Attrs: attrs,
27852	}
27853	op := scope.AddOperation(opspec)
27854	return op.Output(0)
27855}
27856
27857// ResourceScatterNdSubAttr is an optional argument to ResourceScatterNdSub.
27858type ResourceScatterNdSubAttr func(optionalAttr)
27859
27860// ResourceScatterNdSubUseLocking sets the optional use_locking attribute to value.
27861//
27862// value: An optional bool. Defaults to True. If True, the assignment will
27863// be protected by a lock; otherwise the behavior is undefined,
27864// but may exhibit less contention.
27865// If not specified, defaults to true
27866func ResourceScatterNdSubUseLocking(value bool) ResourceScatterNdSubAttr {
27867	return func(m optionalAttr) {
27868		m["use_locking"] = value
27869	}
27870}
27871
27872// Applies sparse subtraction to individual values or slices in a Variable.
27873//
27874// `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
27875//
27876// `indices` must be integer tensor, containing indices into `ref`.
27877// It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
27878//
27879// The innermost dimension of `indices` (with length `K`) corresponds to
27880// indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
27881// dimension of `ref`.
27882//
27883// `updates` is `Tensor` of rank `Q-1+P-K` with shape:
27884//
27885// ```
27886// [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]
27887// ```
27888//
27889// For example, say we want to subtract 4 scattered elements from a rank-1 tensor
27890// with 8 elements. In Python, that subtraction would look like this:
27891//
27892// ```python
27893// ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True)
27894// indices = tf.constant([[4], [3], [1], [7]])
27895// updates = tf.constant([9, 10, 11, 12])
27896// sub = tf.scatter_nd_sub(ref, indices, updates)
27897// with tf.Session() as sess:
27898//   print sess.run(sub)
27899// ```
27900//
27901// The resulting update to ref would look like this:
27902//
27903//     [1, -9, 3, -6, -4, 6, 7, -4]
27904//
27905// See `tf.scatter_nd` for more details about how to make updates to
27906// slices.
27907//
27908// Arguments:
27909//	ref: A resource handle. Must be from a VarHandleOp.
27910//	indices: A Tensor. Must be one of the following types: int32, int64.
27911// A tensor of indices into ref.
27912//	updates: A Tensor. Must have the same type as ref. A tensor of
27913// values to add to ref.
27914//
27915// Returns the created operation.
27916func ResourceScatterNdSub(scope *Scope, ref tf.Output, indices tf.Output, updates tf.Output, optional ...ResourceScatterNdSubAttr) (o *tf.Operation) {
27917	if scope.Err() != nil {
27918		return
27919	}
27920	attrs := map[string]interface{}{}
27921	for _, a := range optional {
27922		a(attrs)
27923	}
27924	opspec := tf.OpSpec{
27925		Type: "ResourceScatterNdSub",
27926		Input: []tf.Input{
27927			ref, indices, updates,
27928		},
27929		Attrs: attrs,
27930	}
27931	return scope.AddOperation(opspec)
27932}
27933
27934// Outputs deterministic pseudorandom random integers from a uniform distribution.
27935//
27936// The generated values follow a uniform distribution in the range `[minval, maxval)`.
27937//
27938// The outputs are a deterministic function of `shape`, `seed`, `minval`, and `maxval`.
27939//
27940// Arguments:
27941//	shape: The shape of the output tensor.
27942//	seed: 2 seeds (shape [2]).
27943//	minval: Minimum value (inclusive, scalar).
27944//	maxval: Maximum value (exclusive, scalar).
27945//
27946// Returns Random values with specified shape.
27947func StatelessRandomUniformInt(scope *Scope, shape tf.Output, seed tf.Output, minval tf.Output, maxval tf.Output) (output tf.Output) {
27948	if scope.Err() != nil {
27949		return
27950	}
27951	opspec := tf.OpSpec{
27952		Type: "StatelessRandomUniformInt",
27953		Input: []tf.Input{
27954			shape, seed, minval, maxval,
27955		},
27956	}
27957	op := scope.AddOperation(opspec)
27958	return op.Output(0)
27959}
27960
27961// QuantizedConv2DAttr is an optional argument to QuantizedConv2D.
27962type QuantizedConv2DAttr func(optionalAttr)
27963
27964// QuantizedConv2DOutType sets the optional out_type attribute to value.
27965// If not specified, defaults to DT_QINT32
27966func QuantizedConv2DOutType(value tf.DataType) QuantizedConv2DAttr {
27967	return func(m optionalAttr) {
27968		m["out_type"] = value
27969	}
27970}
27971
27972// QuantizedConv2DDilations sets the optional dilations attribute to value.
27973//
27974// value: 1-D tensor of length 4.  The dilation factor for each dimension of
27975// `input`. If set to k > 1, there will be k-1 skipped cells between each
27976// filter element on that dimension. The dimension order is determined by the
27977// value of `data_format`, see above for details. Dilations in the batch and
27978// depth dimensions must be 1.
27979// If not specified, defaults to <i:1 i:1 i:1 i:1 >
27980func QuantizedConv2DDilations(value []int64) QuantizedConv2DAttr {
27981	return func(m optionalAttr) {
27982		m["dilations"] = value
27983	}
27984}
27985
27986// Computes a 2D convolution given quantized 4D input and filter tensors.
27987//
27988// The inputs are quantized tensors where the lowest value represents the real
27989// number of the associated minimum, and the highest represents the maximum.
27990// This means that you can only interpret the quantized output in the same way, by
27991// taking the returned minimum and maximum values into account.
27992//
27993// Arguments:
27994//
27995//	filter: filter's input_depth dimension must match input's depth dimensions.
27996//	min_input: The float value that the lowest quantized input value represents.
27997//	max_input: The float value that the highest quantized input value represents.
27998//	min_filter: The float value that the lowest quantized filter value represents.
27999//	max_filter: The float value that the highest quantized filter value represents.
28000//	strides: The stride of the sliding window for each dimension of the input
28001// tensor.
28002//	padding: The type of padding algorithm to use.
28003//
28004// Returns The float value that the lowest quantized output value represents.The float value that the highest quantized output value represents.
28005func QuantizedConv2D(scope *Scope, input tf.Output, filter tf.Output, min_input tf.Output, max_input tf.Output, min_filter tf.Output, max_filter tf.Output, strides []int64, padding string, optional ...QuantizedConv2DAttr) (output tf.Output, min_output tf.Output, max_output tf.Output) {
28006	if scope.Err() != nil {
28007		return
28008	}
28009	attrs := map[string]interface{}{"strides": strides, "padding": padding}
28010	for _, a := range optional {
28011		a(attrs)
28012	}
28013	opspec := tf.OpSpec{
28014		Type: "QuantizedConv2D",
28015		Input: []tf.Input{
28016			input, filter, min_input, max_input, min_filter, max_filter,
28017		},
28018		Attrs: attrs,
28019	}
28020	op := scope.AddOperation(opspec)
28021	return op.Output(0), op.Output(1), op.Output(2)
28022}
28023
28024// ResourceGatherAttr is an optional argument to ResourceGather.
28025type ResourceGatherAttr func(optionalAttr)
28026
28027// ResourceGatherBatchDims sets the optional batch_dims attribute to value.
28028// If not specified, defaults to 0
28029func ResourceGatherBatchDims(value int64) ResourceGatherAttr {
28030	return func(m optionalAttr) {
28031		m["batch_dims"] = value
28032	}
28033}
28034
28035// ResourceGatherValidateIndices sets the optional validate_indices attribute to value.
28036// If not specified, defaults to true
28037func ResourceGatherValidateIndices(value bool) ResourceGatherAttr {
28038	return func(m optionalAttr) {
28039		m["validate_indices"] = value
28040	}
28041}
28042
28043// Gather slices from the variable pointed to by `resource` according to `indices`.
28044//
28045// `indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
28046// Produces an output tensor with shape `indices.shape + params.shape[1:]` where:
28047//
28048// ```python
28049//     # Scalar indices
28050//     output[:, ..., :] = params[indices, :, ... :]
28051//
28052//     # Vector indices
28053//     output[i, :, ..., :] = params[indices[i], :, ... :]
28054//
28055//     # Higher rank indices
28056//     output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]
28057// ```
28058func ResourceGather(scope *Scope, resource tf.Output, indices tf.Output, dtype tf.DataType, optional ...ResourceGatherAttr) (output tf.Output) {
28059	if scope.Err() != nil {
28060		return
28061	}
28062	attrs := map[string]interface{}{"dtype": dtype}
28063	for _, a := range optional {
28064		a(attrs)
28065	}
28066	opspec := tf.OpSpec{
28067		Type: "ResourceGather",
28068		Input: []tf.Input{
28069			resource, indices,
28070		},
28071		Attrs: attrs,
28072	}
28073	op := scope.AddOperation(opspec)
28074	return op.Output(0)
28075}
28076
28077// StatelessMultinomialAttr is an optional argument to StatelessMultinomial.
28078type StatelessMultinomialAttr func(optionalAttr)
28079
28080// StatelessMultinomialOutputDtype sets the optional output_dtype attribute to value.
28081// If not specified, defaults to DT_INT64
28082func StatelessMultinomialOutputDtype(value tf.DataType) StatelessMultinomialAttr {
28083	return func(m optionalAttr) {
28084		m["output_dtype"] = value
28085	}
28086}
28087
28088// Draws samples from a multinomial distribution.
28089//
28090// Arguments:
28091//	logits: 2-D Tensor with shape `[batch_size, num_classes]`.  Each slice `[i, :]`
28092// represents the unnormalized log probabilities for all classes.
28093//	num_samples: 0-D.  Number of independent samples to draw for each row slice.
28094//	seed: 2 seeds (shape [2]).
28095//
28096// Returns 2-D Tensor with shape `[batch_size, num_samples]`.  Each slice `[i, :]`
28097// contains the drawn class labels with range `[0, num_classes)`.
28098func StatelessMultinomial(scope *Scope, logits tf.Output, num_samples tf.Output, seed tf.Output, optional ...StatelessMultinomialAttr) (output tf.Output) {
28099	if scope.Err() != nil {
28100		return
28101	}
28102	attrs := map[string]interface{}{}
28103	for _, a := range optional {
28104		a(attrs)
28105	}
28106	opspec := tf.OpSpec{
28107		Type: "StatelessMultinomial",
28108		Input: []tf.Input{
28109			logits, num_samples, seed,
28110		},
28111		Attrs: attrs,
28112	}
28113	op := scope.AddOperation(opspec)
28114	return op.Output(0)
28115}
28116
28117// Returns a batched matrix tensor with new batched diagonal values.
28118//
28119// Given `input` and `diagonal`, this operation returns a tensor with the
28120// same shape and values as `input`, except for the main diagonal of the
28121// innermost matrices.  These will be overwritten by the values in `diagonal`.
28122//
28123// The output is computed as follows:
28124//
28125// Assume `input` has `k+1` dimensions `[I, J, K, ..., M, N]` and `diagonal` has
28126// `k` dimensions `[I, J, K, ..., min(M, N)]`.  Then the output is a
28127// tensor of rank `k+1` with dimensions `[I, J, K, ..., M, N]` where:
28128//
28129//   * `output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]` for `m == n`.
28130//   * `output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n]` for `m != n`.
28131//
28132// Arguments:
28133//	input: Rank `k+1`, where `k >= 1`.
28134//	diagonal: Rank `k`, where `k >= 1`.
28135//
28136// Returns Rank `k+1`, with `output.shape = input.shape`.
28137func MatrixSetDiag(scope *Scope, input tf.Output, diagonal tf.Output) (output tf.Output) {
28138	if scope.Err() != nil {
28139		return
28140	}
28141	opspec := tf.OpSpec{
28142		Type: "MatrixSetDiag",
28143		Input: []tf.Input{
28144			input, diagonal,
28145		},
28146	}
28147	op := scope.AddOperation(opspec)
28148	return op.Output(0)
28149}
28150
28151// Returns the element-wise max of two SparseTensors.
28152//
28153// Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
28154//
28155// Arguments:
28156//	a_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
28157// SparseTensor, in the canonical lexicographic ordering.
28158//	a_values: 1-D.  `N` non-empty values corresponding to `a_indices`.
28159//	a_shape: 1-D.  Shape of the input SparseTensor.
28160//	b_indices: counterpart to `a_indices` for the other operand.
28161//	b_values: counterpart to `a_values` for the other operand; must be of the same dtype.
28162//	b_shape: counterpart to `a_shape` for the other operand; the two shapes must be equal.
28163//
28164// Returns 2-D.  The indices of the output SparseTensor.1-D.  The values of the output SparseTensor.
28165func SparseSparseMaximum(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, b_indices tf.Output, b_values tf.Output, b_shape tf.Output) (output_indices tf.Output, output_values tf.Output) {
28166	if scope.Err() != nil {
28167		return
28168	}
28169	opspec := tf.OpSpec{
28170		Type: "SparseSparseMaximum",
28171		Input: []tf.Input{
28172			a_indices, a_values, a_shape, b_indices, b_values, b_shape,
28173		},
28174	}
28175	op := scope.AddOperation(opspec)
28176	return op.Output(0), op.Output(1)
28177}
28178
28179// LoadTPUEmbeddingMDLAdagradLightParametersAttr is an optional argument to LoadTPUEmbeddingMDLAdagradLightParameters.
28180type LoadTPUEmbeddingMDLAdagradLightParametersAttr func(optionalAttr)
28181
28182// LoadTPUEmbeddingMDLAdagradLightParametersTableId sets the optional table_id attribute to value.
28183// If not specified, defaults to -1
28184//
28185// REQUIRES: value >= -1
28186func LoadTPUEmbeddingMDLAdagradLightParametersTableId(value int64) LoadTPUEmbeddingMDLAdagradLightParametersAttr {
28187	return func(m optionalAttr) {
28188		m["table_id"] = value
28189	}
28190}
28191
28192// LoadTPUEmbeddingMDLAdagradLightParametersTableName sets the optional table_name attribute to value.
28193// If not specified, defaults to ""
28194func LoadTPUEmbeddingMDLAdagradLightParametersTableName(value string) LoadTPUEmbeddingMDLAdagradLightParametersAttr {
28195	return func(m optionalAttr) {
28196		m["table_name"] = value
28197	}
28198}
28199
28200// Load MDL Adagrad Light embedding parameters.
28201//
28202// An op that loads optimization parameters into HBM for embedding. Must be
28203// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
28204// embedding table configuration. For example, this op is used to install
28205// parameters that are loaded from a checkpoint before a training loop is
28206// executed.
28207//
28208// Arguments:
28209//	parameters: Value of parameters used in the MDL Adagrad Light optimization algorithm.
28210//	accumulators: Value of accumulators used in the MDL Adagrad Light optimization algorithm.
28211//	weights: Value of weights used in the MDL Adagrad Light optimization algorithm.
28212//	benefits: Value of benefits used in the MDL Adagrad Light optimization algorithm.
28213//
28214//
28215//
28216// Returns the created operation.
28217func LoadTPUEmbeddingMDLAdagradLightParameters(scope *Scope, parameters tf.Output, accumulators tf.Output, weights tf.Output, benefits tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingMDLAdagradLightParametersAttr) (o *tf.Operation) {
28218	if scope.Err() != nil {
28219		return
28220	}
28221	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
28222	for _, a := range optional {
28223		a(attrs)
28224	}
28225	opspec := tf.OpSpec{
28226		Type: "LoadTPUEmbeddingMDLAdagradLightParameters",
28227		Input: []tf.Input{
28228			parameters, accumulators, weights, benefits,
28229		},
28230		Attrs: attrs,
28231	}
28232	return scope.AddOperation(opspec)
28233}
28234
28235// List of the given size with empty elements.
28236//
28237// element_shape: the shape of the future elements of the list
28238// num_elements: the number of elements to reserve
28239// handle: the output list
28240// element_dtype: the desired type of elements in the list.
28241func TensorListReserve(scope *Scope, element_shape tf.Output, num_elements tf.Output, element_dtype tf.DataType) (handle tf.Output) {
28242	if scope.Err() != nil {
28243		return
28244	}
28245	attrs := map[string]interface{}{"element_dtype": element_dtype}
28246	opspec := tf.OpSpec{
28247		Type: "TensorListReserve",
28248		Input: []tf.Input{
28249			element_shape, num_elements,
28250		},
28251		Attrs: attrs,
28252	}
28253	op := scope.AddOperation(opspec)
28254	return op.Output(0)
28255}
28256
28257// Computes the gradient for the inverse of `x` wrt its input.
28258//
28259// Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy`
28260// is the corresponding input gradient.
28261func InvGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
28262	if scope.Err() != nil {
28263		return
28264	}
28265	opspec := tf.OpSpec{
28266		Type: "InvGrad",
28267		Input: []tf.Input{
28268			y, dy,
28269		},
28270	}
28271	op := scope.AddOperation(opspec)
28272	return op.Output(0)
28273}
28274
28275// Reduces sparse updates into the variable referenced by `resource` using the `min` operation.
28276//
28277// This operation computes
28278//
28279//     # Scalar indices
28280//     ref[indices, ...] = min(ref[indices, ...], updates[...])
28281//
28282//     # Vector indices (for each i)
28283//     ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...])
28284//
28285//     # High rank indices (for each i, ..., j)
28286//     ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], updates[i, ..., j, ...])
28287//
28288// Duplicate entries are handled correctly: if multiple `indices` reference
28289// the same location, their contributions are combined.
28290//
28291// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
28292//
28293// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
28294// <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
28295// </div>
28296//
28297// Arguments:
28298//	resource: Should be from a `Variable` node.
28299//	indices: A tensor of indices into the first dimension of `ref`.
28300//	updates: A tensor of updated values to add to `ref`.
28301//
28302// Returns the created operation.
28303func ResourceScatterMin(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation) {
28304	if scope.Err() != nil {
28305		return
28306	}
28307	opspec := tf.OpSpec{
28308		Type: "ResourceScatterMin",
28309		Input: []tf.Input{
28310			resource, indices, updates,
28311		},
28312	}
28313	return scope.AddOperation(opspec)
28314}
28315
28316// Elementwise computes the bitwise OR of `x` and `y`.
28317//
28318// The result will have those bits set, that are set in `x`, `y` or both. The
28319// computation is performed on the underlying representations of `x` and `y`.
28320func BitwiseOr(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
28321	if scope.Err() != nil {
28322		return
28323	}
28324	opspec := tf.OpSpec{
28325		Type: "BitwiseOr",
28326		Input: []tf.Input{
28327			x, y,
28328		},
28329	}
28330	op := scope.AddOperation(opspec)
28331	return op.Output(0)
28332}
28333
28334// MatrixSolveLsAttr is an optional argument to MatrixSolveLs.
28335type MatrixSolveLsAttr func(optionalAttr)
28336
28337// MatrixSolveLsFast sets the optional fast attribute to value.
28338// If not specified, defaults to true
28339func MatrixSolveLsFast(value bool) MatrixSolveLsAttr {
28340	return func(m optionalAttr) {
28341		m["fast"] = value
28342	}
28343}
28344
28345// Solves one or more linear least-squares problems.
28346//
28347// `matrix` is a tensor of shape `[..., M, N]` whose inner-most 2 dimensions
28348// form real or complex matrices of size `[M, N]`. `Rhs` is a tensor of the same
28349// type as `matrix` and shape `[..., M, K]`.
28350// The output is a tensor shape `[..., N, K]` where each output matrix solves
28351// each of the equations
28352// `matrix[..., :, :]` * `output[..., :, :]` = `rhs[..., :, :]`
28353// in the least squares sense.
28354//
28355// We use the following notation for (complex) matrix and right-hand sides
28356// in the batch:
28357//
28358// `matrix`=\\(A \in \mathbb{C}^{m \times n}\\),
28359// `rhs`=\\(B  \in \mathbb{C}^{m \times k}\\),
28360// `output`=\\(X  \in \mathbb{C}^{n \times k}\\),
28361// `l2_regularizer`=\\(\lambda \in \mathbb{R}\\).
28362//
28363// If `fast` is `True`, then the solution is computed by solving the normal
28364// equations using Cholesky decomposition. Specifically, if \\(m \ge n\\) then
28365// \\(X = (A^H A + \lambda I)^{-1} A^H B\\), which solves the least-squares
28366// problem \\(X = \mathrm{argmin}_{Z \in \Re^{n \times k} } ||A Z - B||_F^2 + \lambda ||Z||_F^2\\).
28367// If \\(m \lt n\\) then `output` is computed as
28368// \\(X = A^H (A A^H + \lambda I)^{-1} B\\), which (for \\(\lambda = 0\\)) is the
28369// minimum-norm solution to the under-determined linear system, i.e.
28370// \\(X = \mathrm{argmin}_{Z \in \mathbb{C}^{n \times k} } ||Z||_F^2 \\),
28371// subject to \\(A Z = B\\). Notice that the fast path is only numerically stable
28372// when \\(A\\) is numerically full rank and has a condition number
28373// \\(\mathrm{cond}(A) \lt \frac{1}{\sqrt{\epsilon_{mach} } }\\) or \\(\lambda\\) is
28374// sufficiently large.
28375//
28376// If `fast` is `False` an algorithm based on the numerically robust complete
28377// orthogonal decomposition is used. This computes the minimum-norm
28378// least-squares solution, even when \\(A\\) is rank deficient. This path is
28379// typically 6-7 times slower than the fast path. If `fast` is `False` then
28380// `l2_regularizer` is ignored.
28381//
28382// Arguments:
28383//	matrix: Shape is `[..., M, N]`.
28384//	rhs: Shape is `[..., M, K]`.
28385//	l2_regularizer: Scalar tensor.
28386//
28387// @compatibility(numpy)
28388// Equivalent to np.linalg.lstsq
28389// @end_compatibility
28390//
28391// Returns Shape is `[..., N, K]`.
28392func MatrixSolveLs(scope *Scope, matrix tf.Output, rhs tf.Output, l2_regularizer tf.Output, optional ...MatrixSolveLsAttr) (output tf.Output) {
28393	if scope.Err() != nil {
28394		return
28395	}
28396	attrs := map[string]interface{}{}
28397	for _, a := range optional {
28398		a(attrs)
28399	}
28400	opspec := tf.OpSpec{
28401		Type: "MatrixSolveLs",
28402		Input: []tf.Input{
28403			matrix, rhs, l2_regularizer,
28404		},
28405		Attrs: attrs,
28406	}
28407	op := scope.AddOperation(opspec)
28408	return op.Output(0)
28409}
28410
28411// Interleave the values from the `data` tensors into a single tensor.
28412//
28413// Builds a merged tensor such that
28414//
28415// ```python
28416//     merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
28417// ```
28418//
28419// For example, if each `indices[m]` is scalar or vector, we have
28420//
28421// ```python
28422//     # Scalar indices:
28423//     merged[indices[m], ...] = data[m][...]
28424//
28425//     # Vector indices:
28426//     merged[indices[m][i], ...] = data[m][i, ...]
28427// ```
28428//
28429// Each `data[i].shape` must start with the corresponding `indices[i].shape`,
28430// and the rest of `data[i].shape` must be constant w.r.t. `i`.  That is, we
28431// must have `data[i].shape = indices[i].shape + constant`.  In terms of this
28432// `constant`, the output shape is
28433//
28434//     merged.shape = [max(indices)] + constant
28435//
28436// Values are merged in order, so if an index appears in both `indices[m][i]` and
28437// `indices[n][j]` for `(m,i) < (n,j)` the slice `data[n][j]` will appear in the
28438// merged result. If you do not need this guarantee, ParallelDynamicStitch might
28439// perform better on some devices.
28440//
28441// For example:
28442//
28443// ```python
28444//     indices[0] = 6
28445//     indices[1] = [4, 1]
28446//     indices[2] = [[5, 2], [0, 3]]
28447//     data[0] = [61, 62]
28448//     data[1] = [[41, 42], [11, 12]]
28449//     data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
28450//     merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
28451//               [51, 52], [61, 62]]
28452// ```
28453//
28454// This method can be used to merge partitions created by `dynamic_partition`
28455// as illustrated on the following example:
28456//
28457// ```python
28458//     # Apply function (increments x_i) on elements for which a certain condition
28459//     # apply (x_i != -1 in this example).
28460//     x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])
28461//     condition_mask=tf.not_equal(x,tf.constant(-1.))
28462//     partitioned_data = tf.dynamic_partition(
28463//         x, tf.cast(condition_mask, tf.int32) , 2)
28464//     partitioned_data[1] = partitioned_data[1] + 1.0
28465//     condition_indices = tf.dynamic_partition(
28466//         tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)
28467//     x = tf.dynamic_stitch(condition_indices, partitioned_data)
28468//     # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain
28469//     # unchanged.
28470// ```
28471//
28472// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
28473// <img style="width:100%" src="https://www.tensorflow.org/images/DynamicStitch.png" alt>
28474// </div>
28475func DynamicStitch(scope *Scope, indices []tf.Output, data []tf.Output) (merged tf.Output) {
28476	if scope.Err() != nil {
28477		return
28478	}
28479	opspec := tf.OpSpec{
28480		Type: "DynamicStitch",
28481		Input: []tf.Input{
28482			tf.OutputList(indices), tf.OutputList(data),
28483		},
28484	}
28485	op := scope.AddOperation(opspec)
28486	return op.Output(0)
28487}
28488
28489// Performs a padding as a preprocess during a convolution.
28490//
28491// Similar to FusedResizeAndPadConv2d, this op allows for an optimized
28492// implementation where the spatial padding transformation stage is fused with the
28493// im2col lookup, but in this case without the bilinear filtering required for
28494// resizing. Fusing the padding prevents the need to write out the intermediate
28495// results as whole tensors, reducing memory pressure, and we can get some latency
28496// gains by merging the transformation calculations.
28497// The data_format attribute for Conv2D isn't supported by this op, and 'NHWC'
28498// order is used instead.
28499// Internally this op uses a single per-graph scratch buffer, which means that it
28500// will block if multiple versions are being run in parallel. This is because this
28501// operator is primarily an optimization to minimize memory usage.
28502//
28503// Arguments:
28504//	input: 4-D with shape `[batch, in_height, in_width, in_channels]`.
28505//	paddings: A two-column matrix specifying the padding sizes. The number of
28506// rows must be the same as the rank of `input`.
28507//	filter: 4-D with shape
28508// `[filter_height, filter_width, in_channels, out_channels]`.
28509//
28510//	strides: 1-D of length 4.  The stride of the sliding window for each dimension
28511// of `input`. Must be in the same order as the dimension specified with format.
28512//	padding: The type of padding algorithm to use.
28513func FusedPadConv2D(scope *Scope, input tf.Output, paddings tf.Output, filter tf.Output, mode string, strides []int64, padding string) (output tf.Output) {
28514	if scope.Err() != nil {
28515		return
28516	}
28517	attrs := map[string]interface{}{"mode": mode, "strides": strides, "padding": padding}
28518	opspec := tf.OpSpec{
28519		Type: "FusedPadConv2D",
28520		Input: []tf.Input{
28521			input, paddings, filter,
28522		},
28523		Attrs: attrs,
28524	}
28525	op := scope.AddOperation(opspec)
28526	return op.Output(0)
28527}
28528
28529// Conv2DBackpropInputAttr is an optional argument to Conv2DBackpropInput.
28530type Conv2DBackpropInputAttr func(optionalAttr)
28531
28532// Conv2DBackpropInputUseCudnnOnGpu sets the optional use_cudnn_on_gpu attribute to value.
28533// If not specified, defaults to true
28534func Conv2DBackpropInputUseCudnnOnGpu(value bool) Conv2DBackpropInputAttr {
28535	return func(m optionalAttr) {
28536		m["use_cudnn_on_gpu"] = value
28537	}
28538}
28539
28540// Conv2DBackpropInputExplicitPaddings sets the optional explicit_paddings attribute to value.
28541//
28542// value: If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith
28543// dimension, the amount of padding inserted before and after the dimension is
28544// `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If
28545// `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty.
28546// If not specified, defaults to <>
28547func Conv2DBackpropInputExplicitPaddings(value []int64) Conv2DBackpropInputAttr {
28548	return func(m optionalAttr) {
28549		m["explicit_paddings"] = value
28550	}
28551}
28552
28553// Conv2DBackpropInputDataFormat sets the optional data_format attribute to value.
28554//
28555// value: Specify the data format of the input and output data. With the
28556// default format "NHWC", the data is stored in the order of:
28557//     [batch, in_height, in_width, in_channels].
28558// Alternatively, the format could be "NCHW", the data storage order of:
28559//     [batch, in_channels, in_height, in_width].
28560// If not specified, defaults to "NHWC"
28561func Conv2DBackpropInputDataFormat(value string) Conv2DBackpropInputAttr {
28562	return func(m optionalAttr) {
28563		m["data_format"] = value
28564	}
28565}
28566
28567// Conv2DBackpropInputDilations sets the optional dilations attribute to value.
28568//
28569// value: 1-D tensor of length 4.  The dilation factor for each dimension of
28570// `input`. If set to k > 1, there will be k-1 skipped cells between each filter
28571// element on that dimension. The dimension order is determined by the value of
28572// `data_format`, see above for details. Dilations in the batch and depth
28573// dimensions must be 1.
28574// If not specified, defaults to <i:1 i:1 i:1 i:1 >
28575func Conv2DBackpropInputDilations(value []int64) Conv2DBackpropInputAttr {
28576	return func(m optionalAttr) {
28577		m["dilations"] = value
28578	}
28579}
28580
28581// Computes the gradients of convolution with respect to the input.
28582//
28583// Arguments:
28584//	input_sizes: An integer vector representing the shape of `input`,
28585// where `input` is a 4-D `[batch, height, width, channels]` tensor.
28586//	filter: 4-D with shape
28587// `[filter_height, filter_width, in_channels, out_channels]`.
28588//	out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`.
28589// Gradients w.r.t. the output of the convolution.
28590//	strides: The stride of the sliding window for each dimension of the input
28591// of the convolution. Must be in the same order as the dimension specified with
28592// format.
28593//	padding: The type of padding algorithm to use.
28594//
28595// Returns 4-D with shape `[batch, in_height, in_width, in_channels]`.  Gradient
28596// w.r.t. the input of the convolution.
28597func Conv2DBackpropInput(scope *Scope, input_sizes tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv2DBackpropInputAttr) (output tf.Output) {
28598	if scope.Err() != nil {
28599		return
28600	}
28601	attrs := map[string]interface{}{"strides": strides, "padding": padding}
28602	for _, a := range optional {
28603		a(attrs)
28604	}
28605	opspec := tf.OpSpec{
28606		Type: "Conv2DBackpropInput",
28607		Input: []tf.Input{
28608			input_sizes, filter, out_backprop,
28609		},
28610		Attrs: attrs,
28611	}
28612	op := scope.AddOperation(opspec)
28613	return op.Output(0)
28614}
28615
28616// Creates a dataset that executes a SQL query and emits rows of the result set.
28617//
28618// Arguments:
28619//	driver_name: The database type. Currently, the only supported type is 'sqlite'.
28620//	data_source_name: A connection string to connect to the database.
28621//	query: A SQL query to execute.
28622//
28623//
28624func ExperimentalSqlDataset(scope *Scope, driver_name tf.Output, data_source_name tf.Output, query tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
28625	if scope.Err() != nil {
28626		return
28627	}
28628	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
28629	opspec := tf.OpSpec{
28630		Type: "ExperimentalSqlDataset",
28631		Input: []tf.Input{
28632			driver_name, data_source_name, query,
28633		},
28634		Attrs: attrs,
28635	}
28636	op := scope.AddOperation(opspec)
28637	return op.Output(0)
28638}
28639
28640// LoadTPUEmbeddingCenteredRMSPropParametersAttr is an optional argument to LoadTPUEmbeddingCenteredRMSPropParameters.
28641type LoadTPUEmbeddingCenteredRMSPropParametersAttr func(optionalAttr)
28642
28643// LoadTPUEmbeddingCenteredRMSPropParametersTableId sets the optional table_id attribute to value.
28644// If not specified, defaults to -1
28645//
28646// REQUIRES: value >= -1
28647func LoadTPUEmbeddingCenteredRMSPropParametersTableId(value int64) LoadTPUEmbeddingCenteredRMSPropParametersAttr {
28648	return func(m optionalAttr) {
28649		m["table_id"] = value
28650	}
28651}
28652
28653// LoadTPUEmbeddingCenteredRMSPropParametersTableName sets the optional table_name attribute to value.
28654// If not specified, defaults to ""
28655func LoadTPUEmbeddingCenteredRMSPropParametersTableName(value string) LoadTPUEmbeddingCenteredRMSPropParametersAttr {
28656	return func(m optionalAttr) {
28657		m["table_name"] = value
28658	}
28659}
28660
28661// Load centered RMSProp embedding parameters.
28662//
28663// An op that loads optimization parameters into HBM for embedding. Must be
28664// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
28665// embedding table configuration. For example, this op is used to install
28666// parameters that are loaded from a checkpoint before a training loop is
28667// executed.
28668//
28669// Arguments:
28670//	parameters: Value of parameters used in the centered RMSProp optimization algorithm.
28671//	ms: Value of ms used in the centered RMSProp optimization algorithm.
28672//	mom: Value of mom used in the centered RMSProp optimization algorithm.
28673//	mg: Value of mg used in the centered RMSProp optimization algorithm.
28674//
28675//
28676//
28677// Returns the created operation.
28678func LoadTPUEmbeddingCenteredRMSPropParameters(scope *Scope, parameters tf.Output, ms tf.Output, mom tf.Output, mg tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingCenteredRMSPropParametersAttr) (o *tf.Operation) {
28679	if scope.Err() != nil {
28680		return
28681	}
28682	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
28683	for _, a := range optional {
28684		a(attrs)
28685	}
28686	opspec := tf.OpSpec{
28687		Type: "LoadTPUEmbeddingCenteredRMSPropParameters",
28688		Input: []tf.Input{
28689			parameters, ms, mom, mg,
28690		},
28691		Attrs: attrs,
28692	}
28693	return scope.AddOperation(opspec)
28694}
28695
28696// DataFormatVecPermuteAttr is an optional argument to DataFormatVecPermute.
28697type DataFormatVecPermuteAttr func(optionalAttr)
28698
28699// DataFormatVecPermuteSrcFormat sets the optional src_format attribute to value.
28700//
28701// value: source data format.
28702// If not specified, defaults to "NHWC"
28703func DataFormatVecPermuteSrcFormat(value string) DataFormatVecPermuteAttr {
28704	return func(m optionalAttr) {
28705		m["src_format"] = value
28706	}
28707}
28708
28709// DataFormatVecPermuteDstFormat sets the optional dst_format attribute to value.
28710//
28711// value: destination data format.
28712// If not specified, defaults to "NCHW"
28713func DataFormatVecPermuteDstFormat(value string) DataFormatVecPermuteAttr {
28714	return func(m optionalAttr) {
28715		m["dst_format"] = value
28716	}
28717}
28718
28719// Returns the permuted vector/tensor in the destination data format given the
28720//
28721// one in the source data format.
28722//
28723// Arguments:
28724//	x: Vector of size 4 or Tensor of shape (4, 2) in source data format.
28725//
28726// Returns Vector of size 4 or Tensor of shape (4, 2) in destination data format.
28727func DataFormatVecPermute(scope *Scope, x tf.Output, optional ...DataFormatVecPermuteAttr) (y tf.Output) {
28728	if scope.Err() != nil {
28729		return
28730	}
28731	attrs := map[string]interface{}{}
28732	for _, a := range optional {
28733		a(attrs)
28734	}
28735	opspec := tf.OpSpec{
28736		Type: "DataFormatVecPermute",
28737		Input: []tf.Input{
28738			x,
28739		},
28740		Attrs: attrs,
28741	}
28742	op := scope.AddOperation(opspec)
28743	return op.Output(0)
28744}
28745
28746// Returns x / y element-wise.
28747//
28748// *NOTE*: `Div` supports broadcasting. More about broadcasting
28749// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
28750func Div(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
28751	if scope.Err() != nil {
28752		return
28753	}
28754	opspec := tf.OpSpec{
28755		Type: "Div",
28756		Input: []tf.Input{
28757			x, y,
28758		},
28759	}
28760	op := scope.AddOperation(opspec)
28761	return op.Output(0)
28762}
28763
28764// CudnnRNNCanonicalToParamsAttr is an optional argument to CudnnRNNCanonicalToParams.
28765type CudnnRNNCanonicalToParamsAttr func(optionalAttr)
28766
28767// CudnnRNNCanonicalToParamsRnnMode sets the optional rnn_mode attribute to value.
28768// If not specified, defaults to "lstm"
28769func CudnnRNNCanonicalToParamsRnnMode(value string) CudnnRNNCanonicalToParamsAttr {
28770	return func(m optionalAttr) {
28771		m["rnn_mode"] = value
28772	}
28773}
28774
28775// CudnnRNNCanonicalToParamsInputMode sets the optional input_mode attribute to value.
28776// If not specified, defaults to "linear_input"
28777func CudnnRNNCanonicalToParamsInputMode(value string) CudnnRNNCanonicalToParamsAttr {
28778	return func(m optionalAttr) {
28779		m["input_mode"] = value
28780	}
28781}
28782
28783// CudnnRNNCanonicalToParamsDirection sets the optional direction attribute to value.
28784// If not specified, defaults to "unidirectional"
28785func CudnnRNNCanonicalToParamsDirection(value string) CudnnRNNCanonicalToParamsAttr {
28786	return func(m optionalAttr) {
28787		m["direction"] = value
28788	}
28789}
28790
28791// CudnnRNNCanonicalToParamsDropout sets the optional dropout attribute to value.
28792// If not specified, defaults to 0
28793func CudnnRNNCanonicalToParamsDropout(value float32) CudnnRNNCanonicalToParamsAttr {
28794	return func(m optionalAttr) {
28795		m["dropout"] = value
28796	}
28797}
28798
28799// CudnnRNNCanonicalToParamsSeed sets the optional seed attribute to value.
28800// If not specified, defaults to 0
28801func CudnnRNNCanonicalToParamsSeed(value int64) CudnnRNNCanonicalToParamsAttr {
28802	return func(m optionalAttr) {
28803		m["seed"] = value
28804	}
28805}
28806
28807// CudnnRNNCanonicalToParamsSeed2 sets the optional seed2 attribute to value.
28808// If not specified, defaults to 0
28809func CudnnRNNCanonicalToParamsSeed2(value int64) CudnnRNNCanonicalToParamsAttr {
28810	return func(m optionalAttr) {
28811		m["seed2"] = value
28812	}
28813}
28814
28815// Converts CudnnRNN params from canonical form to usable form.
28816//
28817// Writes a set of weights into the opaque params buffer so they can be used in
28818// upcoming training or inferences.
28819//
28820// Note that the params buffer may not be compatible across different GPUs. So any
28821// save and restoration should be converted to and from the canonical weights and
28822// biases.
28823//
28824// num_layers: Specifies the number of layers in the RNN model.
28825// num_units: Specifies the size of the hidden state.
28826// input_size: Specifies the size of the input state.
28827// weights: the canonical form of weights that can be used for saving
28828//     and restoration. They are more likely to be compatible across different
28829//     generations.
28830// biases: the canonical form of biases that can be used for saving
28831//     and restoration. They are more likely to be compatible across different
28832//     generations.
28833// num_params: number of parameter sets for all layers.
28834//     Each layer may contain multiple parameter sets, with each set consisting of
28835//     a weight matrix and a bias vector.
28836// rnn_mode: Indicates the type of the RNN model.
28837// input_mode: Indicate whether there is a linear projection between the input and
28838//     The actual computation before the first layer. 'skip_input' is only allowed
28839//     when input_size == num_units; 'auto_select' implies 'skip_input' when
28840//     input_size == num_units; otherwise, it implies 'linear_input'.
28841// direction: Indicates whether a bidirectional model will be used.
28842//     dir = (direction == bidirectional) ? 2 : 1
28843// dropout: dropout probability. When set to 0., dropout is disabled.
28844// seed: the 1st part of a seed to initialize dropout.
28845// seed2: the 2nd part of a seed to initialize dropout.
28846func CudnnRNNCanonicalToParams(scope *Scope, num_layers tf.Output, num_units tf.Output, input_size tf.Output, weights []tf.Output, biases []tf.Output, optional ...CudnnRNNCanonicalToParamsAttr) (params tf.Output) {
28847	if scope.Err() != nil {
28848		return
28849	}
28850	attrs := map[string]interface{}{}
28851	for _, a := range optional {
28852		a(attrs)
28853	}
28854	opspec := tf.OpSpec{
28855		Type: "CudnnRNNCanonicalToParams",
28856		Input: []tf.Input{
28857			num_layers, num_units, input_size, tf.OutputList(weights), tf.OutputList(biases),
28858		},
28859		Attrs: attrs,
28860	}
28861	op := scope.AddOperation(opspec)
28862	return op.Output(0)
28863}
28864
28865// Creates a dataset containing elements of first component of `input_dataset` having true in the last component.
28866func FilterByLastComponentDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (output tf.Output) {
28867	if scope.Err() != nil {
28868		return
28869	}
28870	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
28871	opspec := tf.OpSpec{
28872		Type: "FilterByLastComponentDataset",
28873		Input: []tf.Input{
28874			input_dataset,
28875		},
28876		Attrs: attrs,
28877	}
28878	op := scope.AddOperation(opspec)
28879	return op.Output(0)
28880}
28881
28882// Computes the absolute value of a tensor.
28883//
28884// Given a tensor `x`, this operation returns a tensor containing the absolute
28885// value of each element in `x`. For example, if x is an input element and y is
28886// an output element, this operation computes \\(y = |x|\\).
28887func Abs(scope *Scope, x tf.Output) (y tf.Output) {
28888	if scope.Err() != nil {
28889		return
28890	}
28891	opspec := tf.OpSpec{
28892		Type: "Abs",
28893		Input: []tf.Input{
28894			x,
28895		},
28896	}
28897	op := scope.AddOperation(opspec)
28898	return op.Output(0)
28899}
28900
28901// MaxPoolGradV2Attr is an optional argument to MaxPoolGradV2.
28902type MaxPoolGradV2Attr func(optionalAttr)
28903
28904// MaxPoolGradV2DataFormat sets the optional data_format attribute to value.
28905//
28906// value: Specify the data format of the input and output data. With the
28907// default format "NHWC", the data is stored in the order of:
28908//     [batch, in_height, in_width, in_channels].
28909// Alternatively, the format could be "NCHW", the data storage order of:
28910//     [batch, in_channels, in_height, in_width].
28911// If not specified, defaults to "NHWC"
28912func MaxPoolGradV2DataFormat(value string) MaxPoolGradV2Attr {
28913	return func(m optionalAttr) {
28914		m["data_format"] = value
28915	}
28916}
28917
28918// Computes gradients of the maxpooling function.
28919//
28920// Arguments:
28921//	orig_input: The original input tensor.
28922//	orig_output: The original output tensor.
28923//	grad: 4-D.  Gradients w.r.t. the output of `max_pool`.
28924//	ksize: The size of the window for each dimension of the input tensor.
28925//	strides: The stride of the sliding window for each dimension of the
28926// input tensor.
28927//	padding: The type of padding algorithm to use.
28928//
28929// Returns Gradients w.r.t. the input to `max_pool`.
28930func MaxPoolGradV2(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize tf.Output, strides tf.Output, padding string, optional ...MaxPoolGradV2Attr) (output tf.Output) {
28931	if scope.Err() != nil {
28932		return
28933	}
28934	attrs := map[string]interface{}{"padding": padding}
28935	for _, a := range optional {
28936		a(attrs)
28937	}
28938	opspec := tf.OpSpec{
28939		Type: "MaxPoolGradV2",
28940		Input: []tf.Input{
28941			orig_input, orig_output, grad, ksize, strides,
28942		},
28943		Attrs: attrs,
28944	}
28945	op := scope.AddOperation(opspec)
28946	return op.Output(0)
28947}
28948
28949// Restore a reader to a previously saved state.
28950//
28951// Not all Readers support being restored, so this can produce an
28952// Unimplemented error.
28953//
28954// Arguments:
28955//	reader_handle: Handle to a Reader.
28956//	state: Result of a ReaderSerializeState of a Reader with type
28957// matching reader_handle.
28958//
28959// Returns the created operation.
28960func ReaderRestoreStateV2(scope *Scope, reader_handle tf.Output, state tf.Output) (o *tf.Operation) {
28961	if scope.Err() != nil {
28962		return
28963	}
28964	opspec := tf.OpSpec{
28965		Type: "ReaderRestoreStateV2",
28966		Input: []tf.Input{
28967			reader_handle, state,
28968		},
28969	}
28970	return scope.AddOperation(opspec)
28971}
28972
28973// Inverse fast Fourier transform.
28974//
28975// Computes the inverse 1-dimensional discrete Fourier transform over the
28976// inner-most dimension of `input`.
28977//
28978// Arguments:
28979//	input: A complex tensor.
28980//
28981// Returns A complex tensor of the same shape as `input`. The inner-most
28982//   dimension of `input` is replaced with its inverse 1D Fourier transform.
28983//
28984// @compatibility(numpy)
28985// Equivalent to np.fft.ifft
28986// @end_compatibility
28987func IFFT(scope *Scope, input tf.Output) (output tf.Output) {
28988	if scope.Err() != nil {
28989		return
28990	}
28991	opspec := tf.OpSpec{
28992		Type: "IFFT",
28993		Input: []tf.Input{
28994			input,
28995		},
28996	}
28997	op := scope.AddOperation(opspec)
28998	return op.Output(0)
28999}
29000
29001// 2D fast Fourier transform.
29002//
29003// Computes the 2-dimensional discrete Fourier transform over the inner-most
29004// 2 dimensions of `input`.
29005//
29006// Arguments:
29007//	input: A complex tensor.
29008//
29009// Returns A complex tensor of the same shape as `input`. The inner-most 2
29010//   dimensions of `input` are replaced with their 2D Fourier transform.
29011//
29012// @compatibility(numpy)
29013// Equivalent to np.fft.fft2
29014// @end_compatibility
29015func FFT2D(scope *Scope, input tf.Output) (output tf.Output) {
29016	if scope.Err() != nil {
29017		return
29018	}
29019	opspec := tf.OpSpec{
29020		Type: "FFT2D",
29021		Input: []tf.Input{
29022			input,
29023		},
29024	}
29025	op := scope.AddOperation(opspec)
29026	return op.Output(0)
29027}
29028
29029// Inverse 2D fast Fourier transform.
29030//
29031// Computes the inverse 2-dimensional discrete Fourier transform over the
29032// inner-most 2 dimensions of `input`.
29033//
29034// Arguments:
29035//	input: A complex tensor.
29036//
29037// Returns A complex tensor of the same shape as `input`. The inner-most 2
29038//   dimensions of `input` are replaced with their inverse 2D Fourier transform.
29039//
29040// @compatibility(numpy)
29041// Equivalent to np.fft.ifft2
29042// @end_compatibility
29043func IFFT2D(scope *Scope, input tf.Output) (output tf.Output) {
29044	if scope.Err() != nil {
29045		return
29046	}
29047	opspec := tf.OpSpec{
29048		Type: "IFFT2D",
29049		Input: []tf.Input{
29050			input,
29051		},
29052	}
29053	op := scope.AddOperation(opspec)
29054	return op.Output(0)
29055}
29056
29057// Inverse 3D real-valued fast Fourier transform.
29058//
29059// Computes the inverse 3-dimensional discrete Fourier transform of a real-valued
29060// signal over the inner-most 3 dimensions of `input`.
29061//
29062// The inner-most 3 dimensions of `input` are assumed to be the result of `RFFT3D`:
29063// The inner-most dimension contains the `fft_length / 2 + 1` unique components of
29064// the DFT of a real-valued signal. If `fft_length` is not provided, it is computed
29065// from the size of the inner-most 3 dimensions of `input`. If the FFT length used
29066// to compute `input` is odd, it should be provided since it cannot be inferred
29067// properly.
29068//
29069// Along each axis `IRFFT3D` is computed on, if `fft_length` (or
29070// `fft_length / 2 + 1` for the inner-most dimension) is smaller than the
29071// corresponding dimension of `input`, the dimension is cropped. If it is larger,
29072// the dimension is padded with zeros.
29073//
29074// Arguments:
29075//	input: A complex64 tensor.
29076//	fft_length: An int32 tensor of shape [3]. The FFT length for each dimension.
29077//
29078// Returns A float32 tensor of the same rank as `input`. The inner-most 3
29079//   dimensions of `input` are replaced with the `fft_length` samples of their
29080//   inverse 3D real Fourier transform.
29081//
29082// @compatibility(numpy)
29083// Equivalent to np.irfftn with 3 dimensions.
29084// @end_compatibility
29085func IRFFT3D(scope *Scope, input tf.Output, fft_length tf.Output) (output tf.Output) {
29086	if scope.Err() != nil {
29087		return
29088	}
29089	opspec := tf.OpSpec{
29090		Type: "IRFFT3D",
29091		Input: []tf.Input{
29092			input, fft_length,
29093		},
29094	}
29095	op := scope.AddOperation(opspec)
29096	return op.Output(0)
29097}
29098
29099// Returns the truth value of (x != y) element-wise.
29100//
29101// *NOTE*: `NotEqual` supports broadcasting. More about broadcasting
29102// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
29103func NotEqual(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
29104	if scope.Err() != nil {
29105		return
29106	}
29107	opspec := tf.OpSpec{
29108		Type: "NotEqual",
29109		Input: []tf.Input{
29110			x, y,
29111		},
29112	}
29113	op := scope.AddOperation(opspec)
29114	return op.Output(0)
29115}
29116
29117// LoadTPUEmbeddingMomentumParametersGradAccumDebugAttr is an optional argument to LoadTPUEmbeddingMomentumParametersGradAccumDebug.
29118type LoadTPUEmbeddingMomentumParametersGradAccumDebugAttr func(optionalAttr)
29119
29120// LoadTPUEmbeddingMomentumParametersGradAccumDebugTableId sets the optional table_id attribute to value.
29121// If not specified, defaults to -1
29122//
29123// REQUIRES: value >= -1
29124func LoadTPUEmbeddingMomentumParametersGradAccumDebugTableId(value int64) LoadTPUEmbeddingMomentumParametersGradAccumDebugAttr {
29125	return func(m optionalAttr) {
29126		m["table_id"] = value
29127	}
29128}
29129
29130// LoadTPUEmbeddingMomentumParametersGradAccumDebugTableName sets the optional table_name attribute to value.
29131// If not specified, defaults to ""
29132func LoadTPUEmbeddingMomentumParametersGradAccumDebugTableName(value string) LoadTPUEmbeddingMomentumParametersGradAccumDebugAttr {
29133	return func(m optionalAttr) {
29134		m["table_name"] = value
29135	}
29136}
29137
29138// Load Momentum embedding parameters with debug support.
29139//
29140// An op that loads optimization parameters into HBM for embedding. Must be
29141// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
29142// embedding table configuration. For example, this op is used to install
29143// parameters that are loaded from a checkpoint before a training loop is
29144// executed.
29145//
29146// Arguments:
29147//	parameters: Value of parameters used in the Momentum optimization algorithm.
29148//	momenta: Value of momenta used in the Momentum optimization algorithm.
29149//	gradient_accumulators: Value of gradient_accumulators used in the Momentum optimization algorithm.
29150//
29151//
29152//
29153// Returns the created operation.
29154func LoadTPUEmbeddingMomentumParametersGradAccumDebug(scope *Scope, parameters tf.Output, momenta tf.Output, gradient_accumulators tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingMomentumParametersGradAccumDebugAttr) (o *tf.Operation) {
29155	if scope.Err() != nil {
29156		return
29157	}
29158	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
29159	for _, a := range optional {
29160		a(attrs)
29161	}
29162	opspec := tf.OpSpec{
29163		Type: "LoadTPUEmbeddingMomentumParametersGradAccumDebug",
29164		Input: []tf.Input{
29165			parameters, momenta, gradient_accumulators,
29166		},
29167		Attrs: attrs,
29168	}
29169	return scope.AddOperation(opspec)
29170}
29171
29172// StatefulStandardNormalAttr is an optional argument to StatefulStandardNormal.
29173type StatefulStandardNormalAttr func(optionalAttr)
29174
29175// StatefulStandardNormalDtype sets the optional dtype attribute to value.
29176//
29177// value: The type of the output.
29178// If not specified, defaults to DT_FLOAT
29179func StatefulStandardNormalDtype(value tf.DataType) StatefulStandardNormalAttr {
29180	return func(m optionalAttr) {
29181		m["dtype"] = value
29182	}
29183}
29184
29185// Outputs random values from a normal distribution.
29186//
29187// The generated values will have mean 0 and standard deviation 1.
29188//
29189// Arguments:
29190//	resource: The handle of the resource variable that stores the state of the RNG.
29191//	shape: The shape of the output tensor.
29192//
29193// Returns A tensor of the specified shape filled with random normal values.
29194func StatefulStandardNormal(scope *Scope, resource tf.Output, shape tf.Output, optional ...StatefulStandardNormalAttr) (output tf.Output) {
29195	if scope.Err() != nil {
29196		return
29197	}
29198	attrs := map[string]interface{}{}
29199	for _, a := range optional {
29200		a(attrs)
29201	}
29202	opspec := tf.OpSpec{
29203		Type: "StatefulStandardNormal",
29204		Input: []tf.Input{
29205			resource, shape,
29206		},
29207		Attrs: attrs,
29208	}
29209	op := scope.AddOperation(opspec)
29210	return op.Output(0)
29211}
29212
29213// Computes the Gauss error function of `x` element-wise.
29214func Erf(scope *Scope, x tf.Output) (y tf.Output) {
29215	if scope.Err() != nil {
29216		return
29217	}
29218	opspec := tf.OpSpec{
29219		Type: "Erf",
29220		Input: []tf.Input{
29221			x,
29222		},
29223	}
29224	op := scope.AddOperation(opspec)
29225	return op.Output(0)
29226}
29227
29228// Returns element-wise largest integer not greater than x.
29229func Floor(scope *Scope, x tf.Output) (y tf.Output) {
29230	if scope.Err() != nil {
29231		return
29232	}
29233	opspec := tf.OpSpec{
29234		Type: "Floor",
29235		Input: []tf.Input{
29236			x,
29237		},
29238	}
29239	op := scope.AddOperation(opspec)
29240	return op.Output(0)
29241}
29242
29243// Returns the number of records this Reader has produced.
29244//
29245// This is the same as the number of ReaderRead executions that have
29246// succeeded.
29247//
29248// Arguments:
29249//	reader_handle: Handle to a Reader.
29250func ReaderNumRecordsProducedV2(scope *Scope, reader_handle tf.Output) (records_produced tf.Output) {
29251	if scope.Err() != nil {
29252		return
29253	}
29254	opspec := tf.OpSpec{
29255		Type: "ReaderNumRecordsProducedV2",
29256		Input: []tf.Input{
29257			reader_handle,
29258		},
29259	}
29260	op := scope.AddOperation(opspec)
29261	return op.Output(0)
29262}
29263
29264// TensorListConcatAttr is an optional argument to TensorListConcat.
29265type TensorListConcatAttr func(optionalAttr)
29266
29267// TensorListConcatElementShape sets the optional element_shape attribute to value.
29268// If not specified, defaults to <unknown_rank:true >
29269func TensorListConcatElementShape(value tf.Shape) TensorListConcatAttr {
29270	return func(m optionalAttr) {
29271		m["element_shape"] = value
29272	}
29273}
29274
29275// Concats all tensors in the list along the 0th dimension.
29276//
29277// Requires that all tensors have the same shape except the first dimension.
29278//
29279// input_handle: The input list.
29280// tensor: The concated result.
29281// lengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used for computing the gradient.
29282//
29283func TensorListConcat(scope *Scope, input_handle tf.Output, element_dtype tf.DataType, optional ...TensorListConcatAttr) (tensor tf.Output, lengths tf.Output) {
29284	if scope.Err() != nil {
29285		return
29286	}
29287	attrs := map[string]interface{}{"element_dtype": element_dtype}
29288	for _, a := range optional {
29289		a(attrs)
29290	}
29291	opspec := tf.OpSpec{
29292		Type: "TensorListConcat",
29293		Input: []tf.Input{
29294			input_handle,
29295		},
29296		Attrs: attrs,
29297	}
29298	op := scope.AddOperation(opspec)
29299	return op.Output(0), op.Output(1)
29300}
29301
29302// Elementwise computes the bitwise AND of `x` and `y`.
29303//
29304// The result will have those bits set, that are set in both `x` and `y`. The
29305// computation is performed on the underlying representations of `x` and `y`.
29306func BitwiseAnd(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
29307	if scope.Err() != nil {
29308		return
29309	}
29310	opspec := tf.OpSpec{
29311		Type: "BitwiseAnd",
29312		Input: []tf.Input{
29313			x, y,
29314		},
29315	}
29316	op := scope.AddOperation(opspec)
29317	return op.Output(0)
29318}
29319
29320// ResizeAreaAttr is an optional argument to ResizeArea.
29321type ResizeAreaAttr func(optionalAttr)
29322
29323// ResizeAreaAlignCorners sets the optional align_corners attribute to value.
29324//
29325// value: If true, the centers of the 4 corner pixels of the input and output tensors are
29326// aligned, preserving the values at the corner pixels. Defaults to false.
29327// If not specified, defaults to false
29328func ResizeAreaAlignCorners(value bool) ResizeAreaAttr {
29329	return func(m optionalAttr) {
29330		m["align_corners"] = value
29331	}
29332}
29333
29334// Resize `images` to `size` using area interpolation.
29335//
29336// Input images can be of different types but output images are always float.
29337//
29338// The range of pixel values for the output image might be slightly different
29339// from the range for the input image because of limited numerical precision.
29340// To guarantee an output range, for example `[0.0, 1.0]`, apply
29341// `tf.clip_by_value` to the output.
29342//
29343// Each output pixel is computed by first transforming the pixel's footprint into
29344// the input tensor and then averaging the pixels that intersect the footprint. An
29345// input pixel's contribution to the average is weighted by the fraction of its
29346// area that intersects the footprint.  This is the same as OpenCV's INTER_AREA.
29347//
29348// Arguments:
29349//	images: 4-D with shape `[batch, height, width, channels]`.
29350//	size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
29351// new size for the images.
29352//
29353// Returns 4-D with shape
29354// `[batch, new_height, new_width, channels]`.
29355func ResizeArea(scope *Scope, images tf.Output, size tf.Output, optional ...ResizeAreaAttr) (resized_images tf.Output) {
29356	if scope.Err() != nil {
29357		return
29358	}
29359	attrs := map[string]interface{}{}
29360	for _, a := range optional {
29361		a(attrs)
29362	}
29363	opspec := tf.OpSpec{
29364		Type: "ResizeArea",
29365		Input: []tf.Input{
29366			images, size,
29367		},
29368		Attrs: attrs,
29369	}
29370	op := scope.AddOperation(opspec)
29371	return op.Output(0)
29372}
29373
29374// Sends `input` to all devices that are connected to the output.
29375//
29376// Sends `input` to all devices that are connected to the output.
29377//
29378// The graph should be constructed so that all ops connected to the output have a
29379// valid device assignment, and the op itself is assigned one of these devices.
29380//
29381// input: The input to the broadcast.
29382// output: The same as input.
29383// shape: The shape of the input tensor.
29384//
29385func NcclBroadcast(scope *Scope, input tf.Output, shape tf.Shape) (output tf.Output) {
29386	if scope.Err() != nil {
29387		return
29388	}
29389	attrs := map[string]interface{}{"shape": shape}
29390	opspec := tf.OpSpec{
29391		Type: "NcclBroadcast",
29392		Input: []tf.Input{
29393			input,
29394		},
29395		Attrs: attrs,
29396	}
29397	op := scope.AddOperation(opspec)
29398	return op.Output(0)
29399}
29400
29401// Computes the gradient of morphological 2-D dilation with respect to the filter.
29402//
29403// Arguments:
29404//	input: 4-D with shape `[batch, in_height, in_width, depth]`.
29405//	filter: 3-D with shape `[filter_height, filter_width, depth]`.
29406//	out_backprop: 4-D with shape `[batch, out_height, out_width, depth]`.
29407//	strides: 1-D of length 4. The stride of the sliding window for each dimension of
29408// the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
29409//	rates: 1-D of length 4. The input stride for atrous morphological dilation.
29410// Must be: `[1, rate_height, rate_width, 1]`.
29411//	padding: The type of padding algorithm to use.
29412//
29413// Returns 3-D with shape `[filter_height, filter_width, depth]`.
29414func Dilation2DBackpropFilter(scope *Scope, input tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, rates []int64, padding string) (filter_backprop tf.Output) {
29415	if scope.Err() != nil {
29416		return
29417	}
29418	attrs := map[string]interface{}{"strides": strides, "rates": rates, "padding": padding}
29419	opspec := tf.OpSpec{
29420		Type: "Dilation2DBackpropFilter",
29421		Input: []tf.Input{
29422			input, filter, out_backprop,
29423		},
29424		Attrs: attrs,
29425	}
29426	op := scope.AddOperation(opspec)
29427	return op.Output(0)
29428}
29429
29430// AddSparseToTensorsMapAttr is an optional argument to AddSparseToTensorsMap.
29431type AddSparseToTensorsMapAttr func(optionalAttr)
29432
29433// AddSparseToTensorsMapContainer sets the optional container attribute to value.
29434//
29435// value: The container name for the `SparseTensorsMap` created by this op.
29436// If not specified, defaults to ""
29437func AddSparseToTensorsMapContainer(value string) AddSparseToTensorsMapAttr {
29438	return func(m optionalAttr) {
29439		m["container"] = value
29440	}
29441}
29442
29443// AddSparseToTensorsMapSharedName sets the optional shared_name attribute to value.
29444//
29445// value: The shared name for the `SparseTensorsMap` created by this op.
29446// If blank, the new Operation's unique name is used.
29447// If not specified, defaults to ""
29448func AddSparseToTensorsMapSharedName(value string) AddSparseToTensorsMapAttr {
29449	return func(m optionalAttr) {
29450		m["shared_name"] = value
29451	}
29452}
29453
29454// Add a `SparseTensor` to a `SparseTensorsMap` return its handle.
29455//
29456// A `SparseTensor` is represented by three tensors: `sparse_indices`,
29457// `sparse_values`, and `sparse_shape`.
29458//
29459// This operator takes the given `SparseTensor` and adds it to a container
29460// object (a `SparseTensorsMap`).  A unique key within this container is generated
29461// in the form of an `int64`, and this is the value that is returned.
29462//
29463// The `SparseTensor` can then be read out as part of a minibatch by passing
29464// the key as a vector element to `TakeManySparseFromTensorsMap`.  To ensure
29465// the correct `SparseTensorsMap` is accessed, ensure that the same
29466// `container` and `shared_name` are passed to that Op.  If no `shared_name`
29467// is provided here, instead use the *name* of the Operation created by calling
29468// `AddSparseToTensorsMap` as the `shared_name` passed to
29469// `TakeManySparseFromTensorsMap`.  Ensure the Operations are colocated.
29470//
29471// Arguments:
29472//	sparse_indices: 2-D.  The `indices` of the `SparseTensor`.
29473//	sparse_values: 1-D.  The `values` of the `SparseTensor`.
29474//	sparse_shape: 1-D.  The `shape` of the `SparseTensor`.
29475//
29476// Returns 0-D.  The handle of the `SparseTensor` now stored in the
29477// `SparseTensorsMap`.
29478func AddSparseToTensorsMap(scope *Scope, sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output, optional ...AddSparseToTensorsMapAttr) (sparse_handle tf.Output) {
29479	if scope.Err() != nil {
29480		return
29481	}
29482	attrs := map[string]interface{}{}
29483	for _, a := range optional {
29484		a(attrs)
29485	}
29486	opspec := tf.OpSpec{
29487		Type: "AddSparseToTensorsMap",
29488		Input: []tf.Input{
29489			sparse_indices, sparse_values, sparse_shape,
29490		},
29491		Attrs: attrs,
29492	}
29493	op := scope.AddOperation(opspec)
29494	return op.Output(0)
29495}
29496
29497// Returns a list list which has the passed-in `Tensor` as last element and the other elements of the given list in `input_handle`.
29498//
29499// tensor: The tensor to put on the list.
29500// input_handle: The old list.
29501// output_handle: A list with the elements of the old list followed by tensor.
29502// element_dtype: the type of elements in the list.
29503// element_shape: a shape compatible with that of elements in the list.
29504func TensorListPushBack(scope *Scope, input_handle tf.Output, tensor tf.Output) (output_handle tf.Output) {
29505	if scope.Err() != nil {
29506		return
29507	}
29508	opspec := tf.OpSpec{
29509		Type: "TensorListPushBack",
29510		Input: []tf.Input{
29511			input_handle, tensor,
29512		},
29513	}
29514	op := scope.AddOperation(opspec)
29515	return op.Output(0)
29516}
29517
29518// EnterAttr is an optional argument to Enter.
29519type EnterAttr func(optionalAttr)
29520
29521// EnterIsConstant sets the optional is_constant attribute to value.
29522//
29523// value: If true, the output is constant within the child frame.
29524// If not specified, defaults to false
29525func EnterIsConstant(value bool) EnterAttr {
29526	return func(m optionalAttr) {
29527		m["is_constant"] = value
29528	}
29529}
29530
29531// EnterParallelIterations sets the optional parallel_iterations attribute to value.
29532//
29533// value: The number of iterations allowed to run in parallel.
29534// If not specified, defaults to 10
29535func EnterParallelIterations(value int64) EnterAttr {
29536	return func(m optionalAttr) {
29537		m["parallel_iterations"] = value
29538	}
29539}
29540
29541// Creates or finds a child frame, and makes `data` available to the child frame.
29542//
29543// This op is used together with `Exit` to create loops in the graph.
29544// The unique `frame_name` is used by the `Executor` to identify frames. If
29545// `is_constant` is true, `output` is a constant in the child frame; otherwise
29546// it may be changed in the child frame. At most `parallel_iterations` iterations
29547// are run in parallel in the child frame.
29548//
29549// Arguments:
29550//	data: The tensor to be made available to the child frame.
29551//	frame_name: The name of the child frame.
29552//
29553// Returns The same tensor as `data`.
29554func Enter(scope *Scope, data tf.Output, frame_name string, optional ...EnterAttr) (output tf.Output) {
29555	if scope.Err() != nil {
29556		return
29557	}
29558	attrs := map[string]interface{}{"frame_name": frame_name}
29559	for _, a := range optional {
29560		a(attrs)
29561	}
29562	opspec := tf.OpSpec{
29563		Type: "Enter",
29564		Input: []tf.Input{
29565			data,
29566		},
29567		Attrs: attrs,
29568	}
29569	op := scope.AddOperation(opspec)
29570	return op.Output(0)
29571}
29572
29573// TryRpcAttr is an optional argument to TryRpc.
29574type TryRpcAttr func(optionalAttr)
29575
29576// TryRpcProtocol sets the optional protocol attribute to value.
29577//
29578// value: RPC protocol to use.  Empty string means use the default protocol.
29579// Options include 'grpc'.
29580// If not specified, defaults to ""
29581func TryRpcProtocol(value string) TryRpcAttr {
29582	return func(m optionalAttr) {
29583		m["protocol"] = value
29584	}
29585}
29586
29587// TryRpcFailFast sets the optional fail_fast attribute to value.
29588//
29589// value: `boolean`. If `true` (default), then failures to connect
29590// (i.e., the server does not immediately respond) cause an RPC failure.
29591// If not specified, defaults to true
29592func TryRpcFailFast(value bool) TryRpcAttr {
29593	return func(m optionalAttr) {
29594		m["fail_fast"] = value
29595	}
29596}
29597
29598// TryRpcTimeoutInMs sets the optional timeout_in_ms attribute to value.
29599//
29600// value: `int`. If `0` (default), then the kernel will run the RPC
29601// request and only time out if the RPC deadline passes or the session times out.
29602// If this value is greater than `0`, then the op will raise an exception if
29603// the RPC takes longer than `timeout_in_ms`.
29604// If not specified, defaults to 0
29605func TryRpcTimeoutInMs(value int64) TryRpcAttr {
29606	return func(m optionalAttr) {
29607		m["timeout_in_ms"] = value
29608	}
29609}
29610
29611// Perform batches of RPC requests.
29612//
29613// This op asynchronously performs either a single RPC request, or a batch
29614// of requests.  RPC requests are defined by three main parameters:
29615//
29616//   - `address` (the host+port or BNS address of the request)
29617//   - `method` (the method name for the request)
29618//   - `request` (the serialized proto string, or vector of strings,
29619//      of the RPC request argument).
29620//
29621// For example, if you have an RPC service running on port localhost:2345,
29622// and its interface is configured with the following proto declaration:
29623//
29624// ```
29625// service MyService {
29626//   rpc MyMethod(MyRequestProto) returns (MyResponseProto) {
29627//   }
29628// };
29629// ```
29630//
29631// then call this op with arguments:
29632//
29633// ```
29634// address = "localhost:2345"
29635// method = "MyService/MyMethod"
29636// ```
29637//
29638// The `request` tensor is a string tensor representing serialized `MyRequestProto`
29639// strings; and the output string tensor `response` will have the same shape
29640// and contain (upon successful completion) corresponding serialized
29641// `MyResponseProto` strings.
29642//
29643// For example, to send a single, empty, `MyRequestProto`, call
29644// this op with `request = ""`.  To send 5 **parallel** empty requests,
29645// call this op with `request = ["", "", "", "", ""]`.
29646//
29647// More generally, one can create a batch of `MyRequestProto` serialized protos
29648// from regular batched tensors using the `encode_proto` op, and convert
29649// the response `MyResponseProto` serialized protos to batched tensors
29650// using the `decode_proto` op.
29651//
29652// **NOTE** Working with serialized proto strings is faster than instantiating
29653// actual proto objects in memory, so no performance degradation is expected
29654// compared to writing custom kernels for this workflow.
29655//
29656// Unlike the standard `Rpc` op, if the connection fails or the remote worker
29657// returns an error status, this op does **not** reraise the exception.
29658// Instead, the `status_code` and `status_message` entry for the corresponding RPC
29659// call is set with the error returned from the RPC call.  The `response` tensor
29660// will contain valid response values for those minibatch entries whose RPCs did
29661// not fail; the rest of the entries will have empty strings.
29662//
29663// Arguments:
29664//	address: `0-D` or `1-D`.  The address (i.e. host_name:port) of the RPC server.
29665// If this tensor has more than 1 element, then multiple parallel rpc requests
29666// are sent.  This argument broadcasts with `method` and `request`.
29667//	method: `0-D` or `1-D`.  The method address on the RPC server.
29668// If this tensor has more than 1 element, then multiple parallel rpc requests
29669// are sent.  This argument broadcasts with `address` and `request`.
29670//	request: `0-D` or `1-D`.  Serialized proto strings: the rpc request argument.
29671// If this tensor has more than 1 element, then multiple parallel rpc requests
29672// are sent.  This argument broadcasts with `address` and `method`.
29673//
29674// Returns Same shape as `request`. Serialized proto strings: the rpc responses.Same shape as `request`.  Values correspond to tensorflow Status enum codes.Same shape as `request`.  Values correspond to Status messages
29675// returned from the RPC calls.
29676func TryRpc(scope *Scope, address tf.Output, method tf.Output, request tf.Output, optional ...TryRpcAttr) (response tf.Output, status_code tf.Output, status_message tf.Output) {
29677	if scope.Err() != nil {
29678		return
29679	}
29680	attrs := map[string]interface{}{}
29681	for _, a := range optional {
29682		a(attrs)
29683	}
29684	opspec := tf.OpSpec{
29685		Type: "TryRpc",
29686		Input: []tf.Input{
29687			address, method, request,
29688		},
29689		Attrs: attrs,
29690	}
29691	op := scope.AddOperation(opspec)
29692	return op.Output(0), op.Output(1), op.Output(2)
29693}
29694
29695// Add all input tensors element wise.
29696//
29697// Arguments:
29698//	inputs: Must all be the same size and shape.
29699func AddN(scope *Scope, inputs []tf.Output) (sum tf.Output) {
29700	if scope.Err() != nil {
29701		return
29702	}
29703	opspec := tf.OpSpec{
29704		Type: "AddN",
29705		Input: []tf.Input{
29706			tf.OutputList(inputs),
29707		},
29708	}
29709	op := scope.AddOperation(opspec)
29710	return op.Output(0)
29711}
29712
29713// Returns the element-wise sum of a list of tensors.
29714//
29715// `tf.accumulate_n_v2` performs the same operation as `tf.add_n`, but does not
29716// wait for all of its inputs to be ready before beginning to sum. This can
29717// save memory if inputs are ready at different times, since minimum temporary
29718// storage is proportional to the output size rather than the inputs size.
29719//
29720// Unlike the original `accumulate_n`, `accumulate_n_v2` is differentiable.
29721//
29722// Returns a `Tensor` of same shape and type as the elements of `inputs`.
29723//
29724// Arguments:
29725//	inputs: A list of `Tensor` objects, each with same shape and type.
29726//	shape: Shape of elements of `inputs`.
29727func AccumulateNV2(scope *Scope, inputs []tf.Output, shape tf.Shape) (sum tf.Output) {
29728	if scope.Err() != nil {
29729		return
29730	}
29731	attrs := map[string]interface{}{"shape": shape}
29732	opspec := tf.OpSpec{
29733		Type: "AccumulateNV2",
29734		Input: []tf.Input{
29735			tf.OutputList(inputs),
29736		},
29737		Attrs: attrs,
29738	}
29739	op := scope.AddOperation(opspec)
29740	return op.Output(0)
29741}
29742
29743// Computes the reciprocal of x element-wise.
29744//
29745// I.e., \\(y = 1 / x\\).
29746func Inv(scope *Scope, x tf.Output) (y tf.Output) {
29747	if scope.Err() != nil {
29748		return
29749	}
29750	opspec := tf.OpSpec{
29751		Type: "Inv",
29752		Input: []tf.Input{
29753			x,
29754		},
29755	}
29756	op := scope.AddOperation(opspec)
29757	return op.Output(0)
29758}
29759
29760// Creates a dataset that batches input elements into a SparseTensor.
29761//
29762// Arguments:
29763//	input_dataset: A handle to an input dataset. Must have a single component.
29764//	batch_size: A scalar representing the number of elements to accumulate in a
29765// batch.
29766//	row_shape: A vector representing the dense shape of each row in the produced
29767// SparseTensor. The shape may be partially specified, using `-1` to indicate
29768// that a particular dimension should use the maximum size of all batch elements.
29769//
29770//
29771func ExperimentalDenseToSparseBatchDataset(scope *Scope, input_dataset tf.Output, batch_size tf.Output, row_shape tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
29772	if scope.Err() != nil {
29773		return
29774	}
29775	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
29776	opspec := tf.OpSpec{
29777		Type: "ExperimentalDenseToSparseBatchDataset",
29778		Input: []tf.Input{
29779			input_dataset, batch_size, row_shape,
29780		},
29781		Attrs: attrs,
29782	}
29783	op := scope.AddOperation(opspec)
29784	return op.Output(0)
29785}
29786
29787// Computes the reciprocal of x element-wise.
29788//
29789// I.e., \\(y = 1 / x\\).
29790func Reciprocal(scope *Scope, x tf.Output) (y tf.Output) {
29791	if scope.Err() != nil {
29792		return
29793	}
29794	opspec := tf.OpSpec{
29795		Type: "Reciprocal",
29796		Input: []tf.Input{
29797			x,
29798		},
29799	}
29800	op := scope.AddOperation(opspec)
29801	return op.Output(0)
29802}
29803
29804// Conv3DBackpropFilterAttr is an optional argument to Conv3DBackpropFilter.
29805type Conv3DBackpropFilterAttr func(optionalAttr)
29806
29807// Conv3DBackpropFilterDilations sets the optional dilations attribute to value.
29808// If not specified, defaults to <i:1 i:1 i:1 i:1 i:1 >
29809func Conv3DBackpropFilterDilations(value []int64) Conv3DBackpropFilterAttr {
29810	return func(m optionalAttr) {
29811		m["dilations"] = value
29812	}
29813}
29814
29815// Computes the gradients of 3-D convolution with respect to the filter.
29816//
29817// DEPRECATED at GraphDef version 10: Use Conv3DBackpropFilterV2
29818//
29819// Arguments:
29820//	input: Shape `[batch, depth, rows, cols, in_channels]`.
29821//	filter: Shape `[depth, rows, cols, in_channels, out_channels]`.
29822// `in_channels` must match between `input` and `filter`.
29823//	out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
29824// out_channels]`.
29825//	strides: 1-D tensor of length 5. The stride of the sliding window for each
29826// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
29827//	padding: The type of padding algorithm to use.
29828func Conv3DBackpropFilter(scope *Scope, input tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv3DBackpropFilterAttr) (output tf.Output) {
29829	if scope.Err() != nil {
29830		return
29831	}
29832	attrs := map[string]interface{}{"strides": strides, "padding": padding}
29833	for _, a := range optional {
29834		a(attrs)
29835	}
29836	opspec := tf.OpSpec{
29837		Type: "Conv3DBackpropFilter",
29838		Input: []tf.Input{
29839			input, filter, out_backprop,
29840		},
29841		Attrs: attrs,
29842	}
29843	op := scope.AddOperation(opspec)
29844	return op.Output(0)
29845}
29846
29847// Computes square root of x element-wise.
29848//
29849// I.e., \\(y = \sqrt{x} = x^{1/2}\\).
29850func Sqrt(scope *Scope, x tf.Output) (y tf.Output) {
29851	if scope.Err() != nil {
29852		return
29853	}
29854	opspec := tf.OpSpec{
29855		Type: "Sqrt",
29856		Input: []tf.Input{
29857			x,
29858		},
29859	}
29860	op := scope.AddOperation(opspec)
29861	return op.Output(0)
29862}
29863
29864// Get the value of the tensor specified by its handle.
29865//
29866// Arguments:
29867//	handle: The handle for a tensor stored in the session state.
29868//	dtype: The type of the output value.
29869//
29870// Returns The tensor for the given handle.
29871func GetSessionTensor(scope *Scope, handle tf.Output, dtype tf.DataType) (value tf.Output) {
29872	if scope.Err() != nil {
29873		return
29874	}
29875	attrs := map[string]interface{}{"dtype": dtype}
29876	opspec := tf.OpSpec{
29877		Type: "GetSessionTensor",
29878		Input: []tf.Input{
29879			handle,
29880		},
29881		Attrs: attrs,
29882	}
29883	op := scope.AddOperation(opspec)
29884	return op.Output(0)
29885}
29886
29887// Computes the gradient for the sqrt of `x` wrt its input.
29888//
29889// Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and `dy`
29890// is the corresponding input gradient.
29891func SqrtGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
29892	if scope.Err() != nil {
29893		return
29894	}
29895	opspec := tf.OpSpec{
29896		Type: "SqrtGrad",
29897		Input: []tf.Input{
29898			y, dy,
29899		},
29900	}
29901	op := scope.AddOperation(opspec)
29902	return op.Output(0)
29903}
29904
29905// MatrixInverseAttr is an optional argument to MatrixInverse.
29906type MatrixInverseAttr func(optionalAttr)
29907
29908// MatrixInverseAdjoint sets the optional adjoint attribute to value.
29909// If not specified, defaults to false
29910func MatrixInverseAdjoint(value bool) MatrixInverseAttr {
29911	return func(m optionalAttr) {
29912		m["adjoint"] = value
29913	}
29914}
29915
29916// Computes the inverse of one or more square invertible matrices or their
29917//
29918// adjoints (conjugate transposes).
29919//
29920// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
29921// form square matrices. The output is a tensor of the same shape as the input
29922// containing the inverse for all input submatrices `[..., :, :]`.
29923//
29924// The op uses LU decomposition with partial pivoting to compute the inverses.
29925//
29926// If a matrix is not invertible there is no guarantee what the op does. It
29927// may detect the condition and raise an exception or it may simply return a
29928// garbage result.
29929//
29930// Arguments:
29931//	input: Shape is `[..., M, M]`.
29932//
29933// Returns Shape is `[..., M, M]`.
29934//
29935// @compatibility(numpy)
29936// Equivalent to np.linalg.inv
29937// @end_compatibility
29938func MatrixInverse(scope *Scope, input tf.Output, optional ...MatrixInverseAttr) (output tf.Output) {
29939	if scope.Err() != nil {
29940		return
29941	}
29942	attrs := map[string]interface{}{}
29943	for _, a := range optional {
29944		a(attrs)
29945	}
29946	opspec := tf.OpSpec{
29947		Type: "MatrixInverse",
29948		Input: []tf.Input{
29949			input,
29950		},
29951		Attrs: attrs,
29952	}
29953	op := scope.AddOperation(opspec)
29954	return op.Output(0)
29955}
29956
29957// Computes reciprocal of square root of x element-wise.
29958//
29959// I.e., \\(y = 1 / \sqrt{x}\\).
29960func Rsqrt(scope *Scope, x tf.Output) (y tf.Output) {
29961	if scope.Err() != nil {
29962		return
29963	}
29964	opspec := tf.OpSpec{
29965		Type: "Rsqrt",
29966		Input: []tf.Input{
29967			x,
29968		},
29969	}
29970	op := scope.AddOperation(opspec)
29971	return op.Output(0)
29972}
29973
29974// Rounds the values of a tensor to the nearest integer, element-wise.
29975//
29976// Rounds half to even.  Also known as bankers rounding. If you want to round
29977// according to the current system rounding mode use std::cint.
29978func Round(scope *Scope, x tf.Output) (y tf.Output) {
29979	if scope.Err() != nil {
29980		return
29981	}
29982	opspec := tf.OpSpec{
29983		Type: "Round",
29984		Input: []tf.Input{
29985			x,
29986		},
29987	}
29988	op := scope.AddOperation(opspec)
29989	return op.Output(0)
29990}
29991
29992// Delete the TensorArray from its resource container.
29993//
29994// This enables the user to close and release the resource in the middle
29995// of a step/run.
29996//
29997// Arguments:
29998//	handle: The handle to a TensorArray (output of TensorArray or TensorArrayGrad).
29999//
30000// Returns the created operation.
30001func TensorArrayCloseV3(scope *Scope, handle tf.Output) (o *tf.Operation) {
30002	if scope.Err() != nil {
30003		return
30004	}
30005	opspec := tf.OpSpec{
30006		Type: "TensorArrayCloseV3",
30007		Input: []tf.Input{
30008			handle,
30009		},
30010	}
30011	return scope.AddOperation(opspec)
30012}
30013
30014// Computes exponential of x element-wise.  \\(y = e^x\\).
30015func Exp(scope *Scope, x tf.Output) (y tf.Output) {
30016	if scope.Err() != nil {
30017		return
30018	}
30019	opspec := tf.OpSpec{
30020		Type: "Exp",
30021		Input: []tf.Input{
30022			x,
30023		},
30024	}
30025	op := scope.AddOperation(opspec)
30026	return op.Output(0)
30027}
30028
30029// NthElementAttr is an optional argument to NthElement.
30030type NthElementAttr func(optionalAttr)
30031
30032// NthElementReverse sets the optional reverse attribute to value.
30033//
30034// value: When set to True, find the nth-largest value in the vector and vice
30035// versa.
30036// If not specified, defaults to false
30037func NthElementReverse(value bool) NthElementAttr {
30038	return func(m optionalAttr) {
30039		m["reverse"] = value
30040	}
30041}
30042
30043// Finds values of the `n`-th order statistic for the last dimension.
30044//
30045// If the input is a vector (rank-1), finds the entries which is the nth-smallest
30046// value in the vector and outputs their values as scalar tensor.
30047//
30048// For matrices (resp. higher rank input), computes the entries which is the
30049// nth-smallest value in each row (resp. vector along the last dimension). Thus,
30050//
30051//     values.shape = input.shape[:-1]
30052//
30053// Arguments:
30054//	input: 1-D or higher with last dimension at least `n+1`.
30055//	n: 0-D. Position of sorted vector to select along the last dimension (along
30056// each row for matrices). Valid range of n is `[0, input.shape[:-1])`
30057//
30058// Returns The `n`-th order statistic along each last dimensional slice.
30059func NthElement(scope *Scope, input tf.Output, n tf.Output, optional ...NthElementAttr) (values tf.Output) {
30060	if scope.Err() != nil {
30061		return
30062	}
30063	attrs := map[string]interface{}{}
30064	for _, a := range optional {
30065		a(attrs)
30066	}
30067	opspec := tf.OpSpec{
30068		Type: "NthElement",
30069		Input: []tf.Input{
30070			input, n,
30071		},
30072		Attrs: attrs,
30073	}
30074	op := scope.AddOperation(opspec)
30075	return op.Output(0)
30076}
30077
30078// Computes the maximum along segments of a tensor.
30079//
30080// Read
30081// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
30082// for an explanation of segments.
30083//
30084// This operator is similar to the unsorted segment sum operator found
30085// [(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).
30086// Instead of computing the sum over segments, it computes the maximum such that:
30087//
30088// \\(output_i = \max_{j...} data[j...]\\) where max is over tuples `j...` such
30089// that `segment_ids[j...] == i`.
30090//
30091// If the maximum is empty for a given segment ID `i`, it outputs the smallest
30092// possible value for the specific numeric type,
30093// `output[i] = numeric_limits<T>::lowest()`.
30094//
30095// If the given segment ID `i` is negative, then the corresponding value is
30096// dropped, and will not be included in the result.
30097//
30098// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
30099// <img style="width:100%" src="https://www.tensorflow.org/images/UnsortedSegmentMax.png" alt>
30100// </div>
30101//
30102// For example:
30103//
30104// ``` python
30105// c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]])
30106// tf.unsorted_segment_max(c, tf.constant([0, 1, 0]), num_segments=2)
30107// # ==> [[ 4,  3, 3, 4],
30108// #       [5,  6, 7, 8]]
30109// ```
30110//
30111//
30112// Arguments:
30113//
30114//	segment_ids: A tensor whose shape is a prefix of `data.shape`.
30115//
30116//
30117// Returns Has same shape as data, except for the first `segment_ids.rank`
30118// dimensions, which are replaced with a single dimension which has size
30119// `num_segments`.
30120func UnsortedSegmentMax(scope *Scope, data tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output) {
30121	if scope.Err() != nil {
30122		return
30123	}
30124	opspec := tf.OpSpec{
30125		Type: "UnsortedSegmentMax",
30126		Input: []tf.Input{
30127			data, segment_ids, num_segments,
30128		},
30129	}
30130	op := scope.AddOperation(opspec)
30131	return op.Output(0)
30132}
30133
30134// Computes softplus: `log(exp(features) + 1)`.
30135func Softplus(scope *Scope, features tf.Output) (activations tf.Output) {
30136	if scope.Err() != nil {
30137		return
30138	}
30139	opspec := tf.OpSpec{
30140		Type: "Softplus",
30141		Input: []tf.Input{
30142			features,
30143		},
30144	}
30145	op := scope.AddOperation(opspec)
30146	return op.Output(0)
30147}
30148
30149// Computes exponential of x - 1 element-wise.
30150//
30151// I.e., \\(y = (\exp x) - 1\\).
30152func Expm1(scope *Scope, x tf.Output) (y tf.Output) {
30153	if scope.Err() != nil {
30154		return
30155	}
30156	opspec := tf.OpSpec{
30157		Type: "Expm1",
30158		Input: []tf.Input{
30159			x,
30160		},
30161	}
30162	op := scope.AddOperation(opspec)
30163	return op.Output(0)
30164}
30165
30166// Computes natural logarithm of x element-wise.
30167//
30168// I.e., \\(y = \log_e x\\).
30169func Log(scope *Scope, x tf.Output) (y tf.Output) {
30170	if scope.Err() != nil {
30171		return
30172	}
30173	opspec := tf.OpSpec{
30174		Type: "Log",
30175		Input: []tf.Input{
30176			x,
30177		},
30178	}
30179	op := scope.AddOperation(opspec)
30180	return op.Output(0)
30181}
30182
30183// Returns the index of a data point that should be added to the seed set.
30184//
30185// Entries in distances are assumed to be squared distances of candidate points to
30186// the already sampled centers in the seed set. The op constructs one Markov chain
30187// of the k-MC^2 algorithm and returns the index of one candidate point to be added
30188// as an additional cluster center.
30189//
30190// Arguments:
30191//	distances: Vector with squared distances to the closest previously sampled cluster center
30192// for each candidate point.
30193//	seed: Scalar. Seed for initializing the random number generator.
30194//
30195// Returns Scalar with the index of the sampled point.
30196func KMC2ChainInitialization(scope *Scope, distances tf.Output, seed tf.Output) (index tf.Output) {
30197	if scope.Err() != nil {
30198		return
30199	}
30200	opspec := tf.OpSpec{
30201		Type: "KMC2ChainInitialization",
30202		Input: []tf.Input{
30203			distances, seed,
30204		},
30205	}
30206	op := scope.AddOperation(opspec)
30207	return op.Output(0)
30208}
30209
30210// Computes hyperbolic sine of x element-wise.
30211func Sinh(scope *Scope, x tf.Output) (y tf.Output) {
30212	if scope.Err() != nil {
30213		return
30214	}
30215	opspec := tf.OpSpec{
30216		Type: "Sinh",
30217		Input: []tf.Input{
30218			x,
30219		},
30220	}
30221	op := scope.AddOperation(opspec)
30222	return op.Output(0)
30223}
30224
30225// Computes the sum along sparse segments of a tensor.
30226//
30227// Read
30228// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
30229// for an explanation of segments.
30230//
30231// Like `SegmentSum`, but `segment_ids` can have rank less than `data`'s first
30232// dimension, selecting a subset of dimension 0, specified by `indices`.
30233//
30234// For example:
30235//
30236// ```python
30237// c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
30238//
30239// # Select two rows, one segment.
30240// tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
30241// # => [[0 0 0 0]]
30242//
30243// # Select two rows, two segment.
30244// tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))
30245// # => [[ 1  2  3  4]
30246// #     [-1 -2 -3 -4]]
30247//
30248// # Select all rows, two segments.
30249// tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
30250// # => [[0 0 0 0]
30251// #     [5 6 7 8]]
30252//
30253// # Which is equivalent to:
30254// tf.segment_sum(c, tf.constant([0, 0, 1]))
30255// ```
30256//
30257// Arguments:
30258//
30259//	indices: A 1-D tensor. Has same rank as `segment_ids`.
30260//	segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
30261//
30262// Returns Has same shape as data, except for dimension 0 which
30263// has size `k`, the number of segments.
30264func SparseSegmentSum(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output) (output tf.Output) {
30265	if scope.Err() != nil {
30266		return
30267	}
30268	opspec := tf.OpSpec{
30269		Type: "SparseSegmentSum",
30270		Input: []tf.Input{
30271			data, indices, segment_ids,
30272		},
30273	}
30274	op := scope.AddOperation(opspec)
30275	return op.Output(0)
30276}
30277
30278// CastAttr is an optional argument to Cast.
30279type CastAttr func(optionalAttr)
30280
30281// CastTruncate sets the optional Truncate attribute to value.
30282// If not specified, defaults to false
30283func CastTruncate(value bool) CastAttr {
30284	return func(m optionalAttr) {
30285		m["Truncate"] = value
30286	}
30287}
30288
30289// Cast x of type SrcT to y of DstT.
30290func Cast(scope *Scope, x tf.Output, DstT tf.DataType, optional ...CastAttr) (y tf.Output) {
30291	if scope.Err() != nil {
30292		return
30293	}
30294	attrs := map[string]interface{}{"DstT": DstT}
30295	for _, a := range optional {
30296		a(attrs)
30297	}
30298	opspec := tf.OpSpec{
30299		Type: "Cast",
30300		Input: []tf.Input{
30301			x,
30302		},
30303		Attrs: attrs,
30304	}
30305	op := scope.AddOperation(opspec)
30306	return op.Output(0)
30307}
30308
30309// Computes the log of the absolute value of `Gamma(x)` element-wise.
30310func Lgamma(scope *Scope, x tf.Output) (y tf.Output) {
30311	if scope.Err() != nil {
30312		return
30313	}
30314	opspec := tf.OpSpec{
30315		Type: "Lgamma",
30316		Input: []tf.Input{
30317			x,
30318		},
30319	}
30320	op := scope.AddOperation(opspec)
30321	return op.Output(0)
30322}
30323
30324// UnicodeEncodeAttr is an optional argument to UnicodeEncode.
30325type UnicodeEncodeAttr func(optionalAttr)
30326
30327// UnicodeEncodeErrors sets the optional errors attribute to value.
30328//
30329// value: Error handling policy when there is invalid formatting found in the input.
30330// The value of 'strict' will cause the operation to produce a InvalidArgument
30331// error on any invalid input formatting. A value of 'replace' (the default) will
30332// cause the operation to replace any invalid formatting in the input with the
30333// `replacement_char` codepoint. A value of 'ignore' will cause the operation to
30334// skip any invalid formatting in the input and produce no corresponding output
30335// character.
30336// If not specified, defaults to "replace"
30337func UnicodeEncodeErrors(value string) UnicodeEncodeAttr {
30338	return func(m optionalAttr) {
30339		m["errors"] = value
30340	}
30341}
30342
30343// UnicodeEncodeReplacementChar sets the optional replacement_char attribute to value.
30344//
30345// value: The replacement character codepoint to be used in place of any invalid
30346// formatting in the input when `errors='replace'`. Any valid unicode codepoint may
30347// be used. The default value is the default unicode replacement character is
30348// 0xFFFD (U+65533).
30349// If not specified, defaults to 65533
30350func UnicodeEncodeReplacementChar(value int64) UnicodeEncodeAttr {
30351	return func(m optionalAttr) {
30352		m["replacement_char"] = value
30353	}
30354}
30355
30356// Encode a tensor of ints into unicode strings.
30357//
30358// Returns a vector of strings, where `output[i]` is constructed by encoding the
30359// Unicode codepoints in `input_values[input_splits[i]:input_splits[i+1]]`
30360// using `output_encoding`.
30361//
30362// ---
30363//
30364// Example:
30365//
30366// ```
30367// input_values = [72, 101, 108, 108, 111, 87, 111, 114, 108, 100]
30368// input_splits = [0, 5, 10]
30369// output_encoding = 'UTF-8'
30370//
30371// output = ['Hello', 'World']
30372// ```
30373//
30374// Arguments:
30375//	input_values: A 1D tensor containing the unicode codepoints that should be encoded.
30376//	input_splits: A 1D tensor specifying how the unicode codepoints should be split into strings.
30377// In particular, `output[i]` is constructed by encoding the codepoints in the
30378// slice `input_values[input_splits[i]:input_splits[i+1]]`.
30379//	output_encoding: Unicode encoding of the output strings. Valid encodings are: `"UTF-8",
30380// "UTF-16-BE", and "UTF-32-BE"`.
30381//
30382// Returns The 1-D Tensor of strings encoded from the provided unicode codepoints.
30383func UnicodeEncode(scope *Scope, input_values tf.Output, input_splits tf.Output, output_encoding string, optional ...UnicodeEncodeAttr) (output tf.Output) {
30384	if scope.Err() != nil {
30385		return
30386	}
30387	attrs := map[string]interface{}{"output_encoding": output_encoding}
30388	for _, a := range optional {
30389		a(attrs)
30390	}
30391	opspec := tf.OpSpec{
30392		Type: "UnicodeEncode",
30393		Input: []tf.Input{
30394			input_values, input_splits,
30395		},
30396		Attrs: attrs,
30397	}
30398	op := scope.AddOperation(opspec)
30399	return op.Output(0)
30400}
30401
30402// Computes the complementary error function of `x` element-wise.
30403func Erfc(scope *Scope, x tf.Output) (y tf.Output) {
30404	if scope.Err() != nil {
30405		return
30406	}
30407	opspec := tf.OpSpec{
30408		Type: "Erfc",
30409		Input: []tf.Input{
30410			x,
30411		},
30412	}
30413	op := scope.AddOperation(opspec)
30414	return op.Output(0)
30415}
30416
30417// Computes sigmoid of `x` element-wise.
30418//
30419// Specifically, `y = 1 / (1 + exp(-x))`.
30420func Sigmoid(scope *Scope, x tf.Output) (y tf.Output) {
30421	if scope.Err() != nil {
30422		return
30423	}
30424	opspec := tf.OpSpec{
30425		Type: "Sigmoid",
30426		Input: []tf.Input{
30427			x,
30428		},
30429	}
30430	op := scope.AddOperation(opspec)
30431	return op.Output(0)
30432}
30433
30434// Computes sin of x element-wise.
30435func Sin(scope *Scope, x tf.Output) (y tf.Output) {
30436	if scope.Err() != nil {
30437		return
30438	}
30439	opspec := tf.OpSpec{
30440		Type: "Sin",
30441		Input: []tf.Input{
30442			x,
30443		},
30444	}
30445	op := scope.AddOperation(opspec)
30446	return op.Output(0)
30447}
30448
30449// FusedBatchNormGradAttr is an optional argument to FusedBatchNormGrad.
30450type FusedBatchNormGradAttr func(optionalAttr)
30451
30452// FusedBatchNormGradEpsilon sets the optional epsilon attribute to value.
30453//
30454// value: A small float number added to the variance of x.
30455// If not specified, defaults to 0.0001
30456func FusedBatchNormGradEpsilon(value float32) FusedBatchNormGradAttr {
30457	return func(m optionalAttr) {
30458		m["epsilon"] = value
30459	}
30460}
30461
30462// FusedBatchNormGradDataFormat sets the optional data_format attribute to value.
30463//
30464// value: The data format for y_backprop, x, x_backprop.
30465// Either "NHWC" (default) or "NCHW".
30466// If not specified, defaults to "NHWC"
30467func FusedBatchNormGradDataFormat(value string) FusedBatchNormGradAttr {
30468	return func(m optionalAttr) {
30469		m["data_format"] = value
30470	}
30471}
30472
30473// FusedBatchNormGradIsTraining sets the optional is_training attribute to value.
30474//
30475// value: A bool value to indicate the operation is for training (default)
30476// or inference.
30477// If not specified, defaults to true
30478func FusedBatchNormGradIsTraining(value bool) FusedBatchNormGradAttr {
30479	return func(m optionalAttr) {
30480		m["is_training"] = value
30481	}
30482}
30483
30484// Gradient for batch normalization.
30485//
30486// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
30487// The size of 1D Tensors matches the dimension C of the 4D Tensors.
30488//
30489// Arguments:
30490//	y_backprop: A 4D Tensor for the gradient with respect to y.
30491//	x: A 4D Tensor for input data.
30492//	scale: A 1D Tensor for scaling factor, to scale the normalized x.
30493//	reserve_space_1: When is_training is True, a 1D Tensor for the computed batch
30494// mean to be reused in gradient computation. When is_training is
30495// False, a 1D Tensor for the population mean to be reused in both
30496// 1st and 2nd order gradient computation.
30497//	reserve_space_2: When is_training is True, a 1D Tensor for the computed batch
30498// variance (inverted variance in the cuDNN case) to be reused in
30499// gradient computation. When is_training is False, a 1D Tensor
30500// for the population variance to be reused in both 1st and 2nd
30501// order gradient computation.
30502//
30503// Returns A 4D Tensor for the gradient with respect to x.A 1D Tensor for the gradient with respect to scale.A 1D Tensor for the gradient with respect to offset.Unused placeholder to match the mean input in FusedBatchNorm.Unused placeholder to match the variance input
30504// in FusedBatchNorm.
30505func FusedBatchNormGrad(scope *Scope, y_backprop tf.Output, x tf.Output, scale tf.Output, reserve_space_1 tf.Output, reserve_space_2 tf.Output, optional ...FusedBatchNormGradAttr) (x_backprop tf.Output, scale_backprop tf.Output, offset_backprop tf.Output, reserve_space_3 tf.Output, reserve_space_4 tf.Output) {
30506	if scope.Err() != nil {
30507		return
30508	}
30509	attrs := map[string]interface{}{}
30510	for _, a := range optional {
30511		a(attrs)
30512	}
30513	opspec := tf.OpSpec{
30514		Type: "FusedBatchNormGrad",
30515		Input: []tf.Input{
30516			y_backprop, x, scale, reserve_space_1, reserve_space_2,
30517		},
30518		Attrs: attrs,
30519	}
30520	op := scope.AddOperation(opspec)
30521	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
30522}
30523
30524// Computes cos of x element-wise.
30525func Cos(scope *Scope, x tf.Output) (y tf.Output) {
30526	if scope.Err() != nil {
30527		return
30528	}
30529	opspec := tf.OpSpec{
30530		Type: "Cos",
30531		Input: []tf.Input{
30532			x,
30533		},
30534	}
30535	op := scope.AddOperation(opspec)
30536	return op.Output(0)
30537}
30538
30539// Computes the determinant of one or more square matrices.
30540//
30541// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
30542// form square matrices. The output is a tensor containing the determinants
30543// for all input submatrices `[..., :, :]`.
30544//
30545// Arguments:
30546//	input: Shape is `[..., M, M]`.
30547//
30548// Returns Shape is `[...]`.
30549func MatrixDeterminant(scope *Scope, input tf.Output) (output tf.Output) {
30550	if scope.Err() != nil {
30551		return
30552	}
30553	opspec := tf.OpSpec{
30554		Type: "MatrixDeterminant",
30555		Input: []tf.Input{
30556			input,
30557		},
30558	}
30559	op := scope.AddOperation(opspec)
30560	return op.Output(0)
30561}
30562
30563// Updates the tree ensemble by either adding a layer to the last tree being grown
30564//
30565// or by starting a new tree.
30566//
30567// Arguments:
30568//	tree_ensemble_handle: Handle to the ensemble variable.
30569//	feature_ids: Rank 1 tensor with ids for each feature. This is the real id of
30570// the feature that will be used in the split.
30571//	node_ids: List of rank 1 tensors representing the nodes for which this feature
30572// has a split.
30573//	gains: List of rank 1 tensors representing the gains for each of the feature's
30574// split.
30575//	thresholds: List of rank 1 tensors representing the thesholds for each of the
30576// feature's split.
30577//	left_node_contribs: List of rank 2 tensors with left leaf contribs for each of
30578// the feature's splits. Will be added to the previous node values to constitute
30579// the values of the left nodes.
30580//	right_node_contribs: List of rank 2 tensors with right leaf contribs for each
30581// of the feature's splits. Will be added to the previous node values to constitute
30582// the values of the right nodes.
30583//	max_depth: Max depth of the tree to build.
30584//	learning_rate: shrinkage const for each new tree.
30585//	pruning_mode: 0-No pruning, 1-Pre-pruning, 2-Post-pruning.
30586//
30587// Returns the created operation.
30588func BoostedTreesUpdateEnsemble(scope *Scope, tree_ensemble_handle tf.Output, feature_ids tf.Output, node_ids []tf.Output, gains []tf.Output, thresholds []tf.Output, left_node_contribs []tf.Output, right_node_contribs []tf.Output, max_depth tf.Output, learning_rate tf.Output, pruning_mode int64) (o *tf.Operation) {
30589	if scope.Err() != nil {
30590		return
30591	}
30592	attrs := map[string]interface{}{"pruning_mode": pruning_mode}
30593	opspec := tf.OpSpec{
30594		Type: "BoostedTreesUpdateEnsemble",
30595		Input: []tf.Input{
30596			tree_ensemble_handle, feature_ids, tf.OutputList(node_ids), tf.OutputList(gains), tf.OutputList(thresholds), tf.OutputList(left_node_contribs), tf.OutputList(right_node_contribs), max_depth, learning_rate,
30597		},
30598		Attrs: attrs,
30599	}
30600	return scope.AddOperation(opspec)
30601}
30602
30603// Computes tan of x element-wise.
30604func Tan(scope *Scope, x tf.Output) (y tf.Output) {
30605	if scope.Err() != nil {
30606		return
30607	}
30608	opspec := tf.OpSpec{
30609		Type: "Tan",
30610		Input: []tf.Input{
30611			x,
30612		},
30613	}
30614	op := scope.AddOperation(opspec)
30615	return op.Output(0)
30616}
30617
30618// Creates a dataset that emits each dim-0 slice of `components` once.
30619func TensorSliceDataset(scope *Scope, components []tf.Output, output_shapes []tf.Shape) (handle tf.Output) {
30620	if scope.Err() != nil {
30621		return
30622	}
30623	attrs := map[string]interface{}{"output_shapes": output_shapes}
30624	opspec := tf.OpSpec{
30625		Type: "TensorSliceDataset",
30626		Input: []tf.Input{
30627			tf.OutputList(components),
30628		},
30629		Attrs: attrs,
30630	}
30631	op := scope.AddOperation(opspec)
30632	return op.Output(0)
30633}
30634
30635// Computes acos of x element-wise.
30636func Acos(scope *Scope, x tf.Output) (y tf.Output) {
30637	if scope.Err() != nil {
30638		return
30639	}
30640	opspec := tf.OpSpec{
30641		Type: "Acos",
30642		Input: []tf.Input{
30643			x,
30644		},
30645	}
30646	op := scope.AddOperation(opspec)
30647	return op.Output(0)
30648}
30649
30650// Computes the Bessel i0e function of `x` element-wise.
30651//
30652// Exponentially scaled modified Bessel function of order 0 defined as
30653// `bessel_i0e(x) = exp(-abs(x)) bessel_i0(x)`.
30654//
30655// This function is faster and numerically stabler than `bessel_i0(x)`.
30656func BesselI0e(scope *Scope, x tf.Output) (y tf.Output) {
30657	if scope.Err() != nil {
30658		return
30659	}
30660	opspec := tf.OpSpec{
30661		Type: "BesselI0e",
30662		Input: []tf.Input{
30663			x,
30664		},
30665	}
30666	op := scope.AddOperation(opspec)
30667	return op.Output(0)
30668}
30669
30670// Shuffle dimensions of x according to a permutation.
30671//
30672// The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy:
30673//   `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`
30674func Transpose(scope *Scope, x tf.Output, perm tf.Output) (y tf.Output) {
30675	if scope.Err() != nil {
30676		return
30677	}
30678	opspec := tf.OpSpec{
30679		Type: "Transpose",
30680		Input: []tf.Input{
30681			x, perm,
30682		},
30683	}
30684	op := scope.AddOperation(opspec)
30685	return op.Output(0)
30686}
30687
30688// MinAttr is an optional argument to Min.
30689type MinAttr func(optionalAttr)
30690
30691// MinKeepDims sets the optional keep_dims attribute to value.
30692//
30693// value: If true, retain reduced dimensions with length 1.
30694// If not specified, defaults to false
30695func MinKeepDims(value bool) MinAttr {
30696	return func(m optionalAttr) {
30697		m["keep_dims"] = value
30698	}
30699}
30700
30701// Computes the minimum of elements across dimensions of a tensor.
30702//
30703// Reduces `input` along the dimensions given in `axis`. Unless
30704// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
30705// `axis`. If `keep_dims` is true, the reduced dimensions are
30706// retained with length 1.
30707//
30708// Arguments:
30709//	input: The tensor to reduce.
30710//	axis: The dimensions to reduce. Must be in the range
30711// `[-rank(input), rank(input))`.
30712//
30713// Returns The reduced tensor.
30714func Min(scope *Scope, input tf.Output, axis tf.Output, optional ...MinAttr) (output tf.Output) {
30715	if scope.Err() != nil {
30716		return
30717	}
30718	attrs := map[string]interface{}{}
30719	for _, a := range optional {
30720		a(attrs)
30721	}
30722	opspec := tf.OpSpec{
30723		Type: "Min",
30724		Input: []tf.Input{
30725			input, axis,
30726		},
30727		Attrs: attrs,
30728	}
30729	op := scope.AddOperation(opspec)
30730	return op.Output(0)
30731}
30732
30733// Computes the Bessel i1e function of `x` element-wise.
30734//
30735// Exponentially scaled modified Bessel function of order 0 defined as
30736// `bessel_i1e(x) = exp(-abs(x)) bessel_i1(x)`.
30737//
30738// This function is faster and numerically stabler than `bessel_i1(x)`.
30739func BesselI1e(scope *Scope, x tf.Output) (y tf.Output) {
30740	if scope.Err() != nil {
30741		return
30742	}
30743	opspec := tf.OpSpec{
30744		Type: "BesselI1e",
30745		Input: []tf.Input{
30746			x,
30747		},
30748	}
30749	op := scope.AddOperation(opspec)
30750	return op.Output(0)
30751}
30752
30753// Returns an element-wise indication of the sign of a number.
30754//
30755// `y = sign(x) = -1` if `x < 0`; 0 if `x == 0`; 1 if `x > 0`.
30756//
30757// For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.
30758func Sign(scope *Scope, x tf.Output) (y tf.Output) {
30759	if scope.Err() != nil {
30760		return
30761	}
30762	opspec := tf.OpSpec{
30763		Type: "Sign",
30764		Input: []tf.Input{
30765			x,
30766		},
30767	}
30768	op := scope.AddOperation(opspec)
30769	return op.Output(0)
30770}
30771
30772// Creates a dataset that passes a sliding window over `input_dataset`.
30773//
30774// Arguments:
30775//
30776//	window_size: A scalar representing the number of elements in the
30777// sliding window.
30778//	window_shift: A scalar representing the steps moving the sliding window
30779// forward in one iteration. It must be positive.
30780//	window_stride: A scalar representing the stride of the input elements of the sliding window.
30781// It must be positive.
30782//
30783//
30784func ExperimentalSlidingWindowDataset(scope *Scope, input_dataset tf.Output, window_size tf.Output, window_shift tf.Output, window_stride tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
30785	if scope.Err() != nil {
30786		return
30787	}
30788	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
30789	opspec := tf.OpSpec{
30790		Type: "ExperimentalSlidingWindowDataset",
30791		Input: []tf.Input{
30792			input_dataset, window_size, window_shift, window_stride,
30793		},
30794		Attrs: attrs,
30795	}
30796	op := scope.AddOperation(opspec)
30797	return op.Output(0)
30798}
30799
30800// OrderedMapUnstageNoKeyAttr is an optional argument to OrderedMapUnstageNoKey.
30801type OrderedMapUnstageNoKeyAttr func(optionalAttr)
30802
30803// OrderedMapUnstageNoKeyCapacity sets the optional capacity attribute to value.
30804// If not specified, defaults to 0
30805//
30806// REQUIRES: value >= 0
30807func OrderedMapUnstageNoKeyCapacity(value int64) OrderedMapUnstageNoKeyAttr {
30808	return func(m optionalAttr) {
30809		m["capacity"] = value
30810	}
30811}
30812
30813// OrderedMapUnstageNoKeyMemoryLimit sets the optional memory_limit attribute to value.
30814// If not specified, defaults to 0
30815//
30816// REQUIRES: value >= 0
30817func OrderedMapUnstageNoKeyMemoryLimit(value int64) OrderedMapUnstageNoKeyAttr {
30818	return func(m optionalAttr) {
30819		m["memory_limit"] = value
30820	}
30821}
30822
30823// OrderedMapUnstageNoKeyContainer sets the optional container attribute to value.
30824// If not specified, defaults to ""
30825func OrderedMapUnstageNoKeyContainer(value string) OrderedMapUnstageNoKeyAttr {
30826	return func(m optionalAttr) {
30827		m["container"] = value
30828	}
30829}
30830
30831// OrderedMapUnstageNoKeySharedName sets the optional shared_name attribute to value.
30832// If not specified, defaults to ""
30833func OrderedMapUnstageNoKeySharedName(value string) OrderedMapUnstageNoKeyAttr {
30834	return func(m optionalAttr) {
30835		m["shared_name"] = value
30836	}
30837}
30838
30839// Op removes and returns the (key, value) element with the smallest
30840//
30841// key from the underlying container.   If the underlying container
30842// does not contain elements, the op will block until it does.
30843func OrderedMapUnstageNoKey(scope *Scope, indices tf.Output, dtypes []tf.DataType, optional ...OrderedMapUnstageNoKeyAttr) (key tf.Output, values []tf.Output) {
30844	if scope.Err() != nil {
30845		return
30846	}
30847	attrs := map[string]interface{}{"dtypes": dtypes}
30848	for _, a := range optional {
30849		a(attrs)
30850	}
30851	opspec := tf.OpSpec{
30852		Type: "OrderedMapUnstageNoKey",
30853		Input: []tf.Input{
30854			indices,
30855		},
30856		Attrs: attrs,
30857	}
30858	op := scope.AddOperation(opspec)
30859	if scope.Err() != nil {
30860		return
30861	}
30862	var idx int
30863	var err error
30864	key = op.Output(idx)
30865	if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
30866		scope.UpdateErr("OrderedMapUnstageNoKey", err)
30867		return
30868	}
30869	return key, values
30870}
30871
30872// Returns element-wise integer closest to x.
30873//
30874// If the result is midway between two representable values,
30875// the even representable is chosen.
30876// For example:
30877//
30878// ```
30879// rint(-1.5) ==> -2.0
30880// rint(0.5000001) ==> 1.0
30881// rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.]
30882// ```
30883func Rint(scope *Scope, x tf.Output) (y tf.Output) {
30884	if scope.Err() != nil {
30885		return
30886	}
30887	opspec := tf.OpSpec{
30888		Type: "Rint",
30889		Input: []tf.Input{
30890			x,
30891		},
30892	}
30893	op := scope.AddOperation(opspec)
30894	return op.Output(0)
30895}
30896
30897// Computes the derivative of a Gamma random sample w.r.t. `alpha`.
30898func RandomGammaGrad(scope *Scope, alpha tf.Output, sample tf.Output) (output tf.Output) {
30899	if scope.Err() != nil {
30900		return
30901	}
30902	opspec := tf.OpSpec{
30903		Type: "RandomGammaGrad",
30904		Input: []tf.Input{
30905			alpha, sample,
30906		},
30907	}
30908	op := scope.AddOperation(opspec)
30909	return op.Output(0)
30910}
30911
30912// Returns x + y element-wise.
30913//
30914// *NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting
30915// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
30916func Add(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
30917	if scope.Err() != nil {
30918		return
30919	}
30920	opspec := tf.OpSpec{
30921		Type: "Add",
30922		Input: []tf.Input{
30923			x, y,
30924		},
30925	}
30926	op := scope.AddOperation(opspec)
30927	return op.Output(0)
30928}
30929
30930// Returns x + y element-wise.
30931//
30932// *NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting
30933// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
30934func AddV2(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
30935	if scope.Err() != nil {
30936		return
30937	}
30938	opspec := tf.OpSpec{
30939		Type: "AddV2",
30940		Input: []tf.Input{
30941			x, y,
30942		},
30943	}
30944	op := scope.AddOperation(opspec)
30945	return op.Output(0)
30946}
30947
30948// AllCandidateSamplerAttr is an optional argument to AllCandidateSampler.
30949type AllCandidateSamplerAttr func(optionalAttr)
30950
30951// AllCandidateSamplerSeed sets the optional seed attribute to value.
30952//
30953// value: If either seed or seed2 are set to be non-zero, the random number
30954// generator is seeded by the given seed.  Otherwise, it is seeded by a
30955// random seed.
30956// If not specified, defaults to 0
30957func AllCandidateSamplerSeed(value int64) AllCandidateSamplerAttr {
30958	return func(m optionalAttr) {
30959		m["seed"] = value
30960	}
30961}
30962
30963// AllCandidateSamplerSeed2 sets the optional seed2 attribute to value.
30964//
30965// value: An second seed to avoid seed collision.
30966// If not specified, defaults to 0
30967func AllCandidateSamplerSeed2(value int64) AllCandidateSamplerAttr {
30968	return func(m optionalAttr) {
30969		m["seed2"] = value
30970	}
30971}
30972
30973// Generates labels for candidate sampling with a learned unigram distribution.
30974//
30975// See explanations of candidate sampling and the data formats at
30976// go/candidate-sampling.
30977//
30978// For each batch, this op picks a single set of sampled candidate labels.
30979//
30980// The advantages of sampling candidates per-batch are simplicity and the
30981// possibility of efficient dense matrix multiplication. The disadvantage is that
30982// the sampled candidates must be chosen independently of the context and of the
30983// true labels.
30984//
30985// Arguments:
30986//	true_classes: A batch_size * num_true matrix, in which each row contains the
30987// IDs of the num_true target_classes in the corresponding original label.
30988//	num_true: Number of true labels per context.
30989//	num_sampled: Number of candidates to produce.
30990//	unique: If unique is true, we sample with rejection, so that all sampled
30991// candidates in a batch are unique. This requires some approximation to
30992// estimate the post-rejection sampling probabilities.
30993//
30994// Returns A vector of length num_sampled, in which each element is
30995// the ID of a sampled candidate.A batch_size * num_true matrix, representing
30996// the number of times each candidate is expected to occur in a batch
30997// of sampled candidates. If unique=true, then this is a probability.A vector of length num_sampled, for each sampled
30998// candidate representing the number of times the candidate is expected
30999// to occur in a batch of sampled candidates.  If unique=true, then this is a
31000// probability.
31001func AllCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, optional ...AllCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output) {
31002	if scope.Err() != nil {
31003		return
31004	}
31005	attrs := map[string]interface{}{"num_true": num_true, "num_sampled": num_sampled, "unique": unique}
31006	for _, a := range optional {
31007		a(attrs)
31008	}
31009	opspec := tf.OpSpec{
31010		Type: "AllCandidateSampler",
31011		Input: []tf.Input{
31012			true_classes,
31013		},
31014		Attrs: attrs,
31015	}
31016	op := scope.AddOperation(opspec)
31017	return op.Output(0), op.Output(1), op.Output(2)
31018}
31019
31020// Returns element-wise remainder of division. When `x < 0` xor `y < 0` is
31021//
31022// true, this follows Python semantics in that the result here is consistent
31023// with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`.
31024//
31025// *NOTE*: `FloorMod` supports broadcasting. More about broadcasting
31026// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
31027func FloorMod(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
31028	if scope.Err() != nil {
31029		return
31030	}
31031	opspec := tf.OpSpec{
31032		Type: "FloorMod",
31033		Input: []tf.Input{
31034			x, y,
31035		},
31036	}
31037	op := scope.AddOperation(opspec)
31038	return op.Output(0)
31039}
31040
31041// Saves the input tensors to disk.
31042//
31043// The size of `tensor_names` must match the number of tensors in `data`. `data[i]`
31044// is written to `filename` with name `tensor_names[i]`.
31045//
31046// See also `SaveSlices`.
31047//
31048// Arguments:
31049//	filename: Must have a single element. The name of the file to which we write
31050// the tensor.
31051//	tensor_names: Shape `[N]`. The names of the tensors to be saved.
31052//	data: `N` tensors to save.
31053//
31054// Returns the created operation.
31055func Save(scope *Scope, filename tf.Output, tensor_names tf.Output, data []tf.Output) (o *tf.Operation) {
31056	if scope.Err() != nil {
31057		return
31058	}
31059	opspec := tf.OpSpec{
31060		Type: "Save",
31061		Input: []tf.Input{
31062			filename, tensor_names, tf.OutputList(data),
31063		},
31064	}
31065	return scope.AddOperation(opspec)
31066}
31067
31068// Returns x * y element-wise. Returns zero if y is zero, even if x if infinite or NaN.
31069//
31070// *NOTE*: `Mul` supports broadcasting. More about broadcasting
31071// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
31072func MulNoNan(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
31073	if scope.Err() != nil {
31074		return
31075	}
31076	opspec := tf.OpSpec{
31077		Type: "MulNoNan",
31078		Input: []tf.Input{
31079			x, y,
31080		},
31081	}
31082	op := scope.AddOperation(opspec)
31083	return op.Output(0)
31084}
31085
31086// Returns x / y element-wise for integer types.
31087//
31088// Truncation designates that negative numbers will round fractional quantities
31089// toward zero. I.e. -7 / 5 = -1. This matches C semantics but it is different
31090// than Python semantics. See `FloorDiv` for a division function that matches
31091// Python Semantics.
31092//
31093// *NOTE*: `TruncateDiv` supports broadcasting. More about broadcasting
31094// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
31095func TruncateDiv(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
31096	if scope.Err() != nil {
31097		return
31098	}
31099	opspec := tf.OpSpec{
31100		Type: "TruncateDiv",
31101		Input: []tf.Input{
31102			x, y,
31103		},
31104	}
31105	op := scope.AddOperation(opspec)
31106	return op.Output(0)
31107}
31108
31109// RequantizePerChannelAttr is an optional argument to RequantizePerChannel.
31110type RequantizePerChannelAttr func(optionalAttr)
31111
31112// RequantizePerChannelOutType sets the optional out_type attribute to value.
31113//
31114// value: The quantized type of output tensor that needs to be converted.
31115// If not specified, defaults to DT_QUINT8
31116func RequantizePerChannelOutType(value tf.DataType) RequantizePerChannelAttr {
31117	return func(m optionalAttr) {
31118		m["out_type"] = value
31119	}
31120}
31121
31122// Requantizes input with min and max values known per channel.
31123//
31124// Arguments:
31125//	input: The original input tensor.
31126//	input_min: The minimum value of the input tensor
31127//	input_max: The maximum value of the input tensor.
31128//	requested_output_min: The minimum value of the output tensor requested.
31129//	requested_output_max: The maximum value of the output tensor requested.
31130//
31131// Returns Output tensor.The minimum value of the final output tensorThe maximum value of the final output tensor.
31132func RequantizePerChannel(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, requested_output_min tf.Output, requested_output_max tf.Output, optional ...RequantizePerChannelAttr) (output tf.Output, output_min tf.Output, output_max tf.Output) {
31133	if scope.Err() != nil {
31134		return
31135	}
31136	attrs := map[string]interface{}{}
31137	for _, a := range optional {
31138		a(attrs)
31139	}
31140	opspec := tf.OpSpec{
31141		Type: "RequantizePerChannel",
31142		Input: []tf.Input{
31143			input, input_min, input_max, requested_output_min, requested_output_max,
31144		},
31145		Attrs: attrs,
31146	}
31147	op := scope.AddOperation(opspec)
31148	return op.Output(0), op.Output(1), op.Output(2)
31149}
31150
31151// Restores tensors from a V2 checkpoint.
31152//
31153// For backward compatibility with the V1 format, this Op currently allows
31154// restoring from a V1 checkpoint as well:
31155//   - This Op first attempts to find the V2 index file pointed to by "prefix", and
31156//     if found proceed to read it as a V2 checkpoint;
31157//   - Otherwise the V1 read path is invoked.
31158// Relying on this behavior is not recommended, as the ability to fall back to read
31159// V1 might be deprecated and eventually removed.
31160//
31161// By default, restores the named tensors in full.  If the caller wishes to restore
31162// specific slices of stored tensors, "shape_and_slices" should be non-empty
31163// strings and correspondingly well-formed.
31164//
31165// Callers must ensure all the named tensors are indeed stored in the checkpoint.
31166//
31167// Arguments:
31168//	prefix: Must have a single element.  The prefix of a V2 checkpoint.
31169//	tensor_names: shape {N}.  The names of the tensors to be restored.
31170//	shape_and_slices: shape {N}.  The slice specs of the tensors to be restored.
31171// Empty strings indicate that they are non-partitioned tensors.
31172//	dtypes: shape {N}.  The list of expected dtype for the tensors.  Must match
31173// those stored in the checkpoint.
31174//
31175// Returns shape {N}.  The restored tensors, whose shapes are read from the
31176// checkpoint directly.
31177func RestoreV2(scope *Scope, prefix tf.Output, tensor_names tf.Output, shape_and_slices tf.Output, dtypes []tf.DataType) (tensors []tf.Output) {
31178	if scope.Err() != nil {
31179		return
31180	}
31181	attrs := map[string]interface{}{"dtypes": dtypes}
31182	opspec := tf.OpSpec{
31183		Type: "RestoreV2",
31184		Input: []tf.Input{
31185			prefix, tensor_names, shape_and_slices,
31186		},
31187		Attrs: attrs,
31188	}
31189	op := scope.AddOperation(opspec)
31190	if scope.Err() != nil {
31191		return
31192	}
31193	var idx int
31194	var err error
31195	if tensors, idx, err = makeOutputList(op, idx, "tensors"); err != nil {
31196		scope.UpdateErr("RestoreV2", err)
31197		return
31198	}
31199	return tensors
31200}
31201
31202// FIFOQueueV2Attr is an optional argument to FIFOQueueV2.
31203type FIFOQueueV2Attr func(optionalAttr)
31204
31205// FIFOQueueV2Shapes sets the optional shapes attribute to value.
31206//
31207// value: The shape of each component in a value. The length of this attr must
31208// be either 0 or the same as the length of component_types. If the length of
31209// this attr is 0, the shapes of queue elements are not constrained, and
31210// only one element may be dequeued at a time.
31211// If not specified, defaults to <>
31212//
31213// REQUIRES: len(value) >= 0
31214func FIFOQueueV2Shapes(value []tf.Shape) FIFOQueueV2Attr {
31215	return func(m optionalAttr) {
31216		m["shapes"] = value
31217	}
31218}
31219
31220// FIFOQueueV2Capacity sets the optional capacity attribute to value.
31221//
31222// value: The upper bound on the number of elements in this queue.
31223// Negative numbers mean no limit.
31224// If not specified, defaults to -1
31225func FIFOQueueV2Capacity(value int64) FIFOQueueV2Attr {
31226	return func(m optionalAttr) {
31227		m["capacity"] = value
31228	}
31229}
31230
31231// FIFOQueueV2Container sets the optional container attribute to value.
31232//
31233// value: If non-empty, this queue is placed in the given container.
31234// Otherwise, a default container is used.
31235// If not specified, defaults to ""
31236func FIFOQueueV2Container(value string) FIFOQueueV2Attr {
31237	return func(m optionalAttr) {
31238		m["container"] = value
31239	}
31240}
31241
31242// FIFOQueueV2SharedName sets the optional shared_name attribute to value.
31243//
31244// value: If non-empty, this queue will be shared under the given name
31245// across multiple sessions.
31246// If not specified, defaults to ""
31247func FIFOQueueV2SharedName(value string) FIFOQueueV2Attr {
31248	return func(m optionalAttr) {
31249		m["shared_name"] = value
31250	}
31251}
31252
31253// A queue that produces elements in first-in first-out order.
31254//
31255// Arguments:
31256//	component_types: The type of each component in a value.
31257//
31258// Returns The handle to the queue.
31259func FIFOQueueV2(scope *Scope, component_types []tf.DataType, optional ...FIFOQueueV2Attr) (handle tf.Output) {
31260	if scope.Err() != nil {
31261		return
31262	}
31263	attrs := map[string]interface{}{"component_types": component_types}
31264	for _, a := range optional {
31265		a(attrs)
31266	}
31267	opspec := tf.OpSpec{
31268		Type: "FIFOQueueV2",
31269
31270		Attrs: attrs,
31271	}
31272	op := scope.AddOperation(opspec)
31273	return op.Output(0)
31274}
31275
31276// Creates a dataset that contains the elements of `input_dataset` ignoring errors.
31277func ExperimentalIgnoreErrorsDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
31278	if scope.Err() != nil {
31279		return
31280	}
31281	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
31282	opspec := tf.OpSpec{
31283		Type: "ExperimentalIgnoreErrorsDataset",
31284		Input: []tf.Input{
31285			input_dataset,
31286		},
31287		Attrs: attrs,
31288	}
31289	op := scope.AddOperation(opspec)
31290	return op.Output(0)
31291}
31292
31293// Returns 0 if x == 0, and x / y otherwise, elementwise.
31294func Xdivy(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
31295	if scope.Err() != nil {
31296		return
31297	}
31298	opspec := tf.OpSpec{
31299		Type: "Xdivy",
31300		Input: []tf.Input{
31301			x, y,
31302		},
31303	}
31304	op := scope.AddOperation(opspec)
31305	return op.Output(0)
31306}
31307
31308// Bucketizes 'input' based on 'boundaries'.
31309//
31310// For example, if the inputs are
31311//     boundaries = [0, 10, 100]
31312//     input = [[-5, 10000]
31313//              [150,   10]
31314//              [5,    100]]
31315//
31316// then the output will be
31317//     output = [[0, 3]
31318//               [3, 2]
31319//               [1, 3]]
31320//
31321// Arguments:
31322//	input: Any shape of Tensor contains with int or float type.
31323//	boundaries: A sorted list of floats gives the boundary of the buckets.
31324//
31325// Returns Same shape with 'input', each value of input replaced with bucket index.
31326//
31327// @compatibility(numpy)
31328// Equivalent to np.digitize.
31329// @end_compatibility
31330func Bucketize(scope *Scope, input tf.Output, boundaries []float32) (output tf.Output) {
31331	if scope.Err() != nil {
31332		return
31333	}
31334	attrs := map[string]interface{}{"boundaries": boundaries}
31335	opspec := tf.OpSpec{
31336		Type: "Bucketize",
31337		Input: []tf.Input{
31338			input,
31339		},
31340		Attrs: attrs,
31341	}
31342	op := scope.AddOperation(opspec)
31343	return op.Output(0)
31344}
31345
31346// Calculates gains for each feature and returns the best possible split information for the feature.
31347//
31348// The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature.
31349//
31350// It is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return `node_ids_list` for each feature, containing the list of nodes that this feature can be used to split.
31351//
31352// In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features).
31353//
31354// The length of output lists are all of the same length, `num_features`.
31355// The output shapes are compatible in a way that the first dimension of all tensors of all lists are the same and equal to the number of possible split nodes for each feature.
31356//
31357// Arguments:
31358//	node_id_range: A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within `stats_summary_list`. The nodes are iterated between the two nodes specified by the tensor, as like `for node_id in range(node_id_range[0], node_id_range[1])` (Note that the last index node_id_range[1] is exclusive).
31359//	stats_summary_list: A list of Rank 3 tensor (#shape=[max_splits, bucket, 2]) for accumulated stats summary (gradient/hessian) per node per buckets for each feature. The first dimension of the tensor is the maximum number of splits, and thus not all elements of it will be used, but only the indexes specified by node_ids will be used.
31360//	l1: l1 regularization factor on leaf weights, per instance based.
31361//	l2: l2 regularization factor on leaf weights, per instance based.
31362//	tree_complexity: adjustment to the gain, per leaf based.
31363//	min_node_weight: mininum avg of hessians in a node before required for the node to be considered for splitting.
31364//	max_splits: the number of nodes that can be split in the whole tree. Used as a dimension of output tensors.
31365//
31366// Returns An output list of Rank 1 tensors indicating possible split node ids for each feature. The length of the list is num_features, but each tensor has different size as each feature provides different possible nodes. See above for details like shapes and sizes.An output list of Rank 1 tensors indicating the best gains for each feature to split for certain nodes. See above for details like shapes and sizes.An output list of Rank 1 tensors indicating the bucket id to compare with (as a threshold) for split in each node. See above for details like shapes and sizes.A list of Rank 2 tensors indicating the contribution of the left nodes when branching from parent nodes (given by the tensor element in the output node_ids_list) to the left direction by the given threshold for each feature. This value will be used to make the left node value by adding to the parent node value. Second dimension size is 1 for 1-dimensional logits, but would be larger for multi-class problems. See above for details like shapes and sizes.A list of Rank 2 tensors, with the same shape/conditions as left_node_contribs_list, but just that the value is for the right node.
31367func BoostedTreesCalculateBestGainsPerFeature(scope *Scope, node_id_range tf.Output, stats_summary_list []tf.Output, l1 tf.Output, l2 tf.Output, tree_complexity tf.Output, min_node_weight tf.Output, max_splits int64) (node_ids_list []tf.Output, gains_list []tf.Output, thresholds_list []tf.Output, left_node_contribs_list []tf.Output, right_node_contribs_list []tf.Output) {
31368	if scope.Err() != nil {
31369		return
31370	}
31371	attrs := map[string]interface{}{"max_splits": max_splits}
31372	opspec := tf.OpSpec{
31373		Type: "BoostedTreesCalculateBestGainsPerFeature",
31374		Input: []tf.Input{
31375			node_id_range, tf.OutputList(stats_summary_list), l1, l2, tree_complexity, min_node_weight,
31376		},
31377		Attrs: attrs,
31378	}
31379	op := scope.AddOperation(opspec)
31380	if scope.Err() != nil {
31381		return
31382	}
31383	var idx int
31384	var err error
31385	if node_ids_list, idx, err = makeOutputList(op, idx, "node_ids_list"); err != nil {
31386		scope.UpdateErr("BoostedTreesCalculateBestGainsPerFeature", err)
31387		return
31388	}
31389	if gains_list, idx, err = makeOutputList(op, idx, "gains_list"); err != nil {
31390		scope.UpdateErr("BoostedTreesCalculateBestGainsPerFeature", err)
31391		return
31392	}
31393	if thresholds_list, idx, err = makeOutputList(op, idx, "thresholds_list"); err != nil {
31394		scope.UpdateErr("BoostedTreesCalculateBestGainsPerFeature", err)
31395		return
31396	}
31397	if left_node_contribs_list, idx, err = makeOutputList(op, idx, "left_node_contribs_list"); err != nil {
31398		scope.UpdateErr("BoostedTreesCalculateBestGainsPerFeature", err)
31399		return
31400	}
31401	if right_node_contribs_list, idx, err = makeOutputList(op, idx, "right_node_contribs_list"); err != nil {
31402		scope.UpdateErr("BoostedTreesCalculateBestGainsPerFeature", err)
31403		return
31404	}
31405	return node_ids_list, gains_list, thresholds_list, left_node_contribs_list, right_node_contribs_list
31406}
31407
31408// EncodePngAttr is an optional argument to EncodePng.
31409type EncodePngAttr func(optionalAttr)
31410
31411// EncodePngCompression sets the optional compression attribute to value.
31412//
31413// value: Compression level.
31414// If not specified, defaults to -1
31415func EncodePngCompression(value int64) EncodePngAttr {
31416	return func(m optionalAttr) {
31417		m["compression"] = value
31418	}
31419}
31420
31421// PNG-encode an image.
31422//
31423// `image` is a 3-D uint8 or uint16 Tensor of shape `[height, width, channels]`
31424// where `channels` is:
31425//
31426// *   1: for grayscale.
31427// *   2: for grayscale + alpha.
31428// *   3: for RGB.
31429// *   4: for RGBA.
31430//
31431// The ZLIB compression level, `compression`, can be -1 for the PNG-encoder
31432// default or a value from 0 to 9.  9 is the highest compression level, generating
31433// the smallest output, but is slower.
31434//
31435// Arguments:
31436//	image: 3-D with shape `[height, width, channels]`.
31437//
31438// Returns 0-D. PNG-encoded image.
31439func EncodePng(scope *Scope, image tf.Output, optional ...EncodePngAttr) (contents tf.Output) {
31440	if scope.Err() != nil {
31441		return
31442	}
31443	attrs := map[string]interface{}{}
31444	for _, a := range optional {
31445		a(attrs)
31446	}
31447	opspec := tf.OpSpec{
31448		Type: "EncodePng",
31449		Input: []tf.Input{
31450			image,
31451		},
31452		Attrs: attrs,
31453	}
31454	op := scope.AddOperation(opspec)
31455	return op.Output(0)
31456}
31457
31458// QueueDequeueUpToV2Attr is an optional argument to QueueDequeueUpToV2.
31459type QueueDequeueUpToV2Attr func(optionalAttr)
31460
31461// QueueDequeueUpToV2TimeoutMs sets the optional timeout_ms attribute to value.
31462//
31463// value: If the queue has fewer than n elements, this operation
31464// will block for up to timeout_ms milliseconds.
31465// Note: This option is not supported yet.
31466// If not specified, defaults to -1
31467func QueueDequeueUpToV2TimeoutMs(value int64) QueueDequeueUpToV2Attr {
31468	return func(m optionalAttr) {
31469		m["timeout_ms"] = value
31470	}
31471}
31472
31473// Dequeues `n` tuples of one or more tensors from the given queue.
31474//
31475// This operation is not supported by all queues.  If a queue does not support
31476// DequeueUpTo, then an Unimplemented error is returned.
31477//
31478// If the queue is closed and there are more than 0 but less than `n`
31479// elements remaining, then instead of returning an OutOfRange error like
31480// QueueDequeueMany, less than `n` elements are returned immediately.  If
31481// the queue is closed and there are 0 elements left in the queue, then
31482// an OutOfRange error is returned just like in QueueDequeueMany.
31483// Otherwise the behavior is identical to QueueDequeueMany:
31484//
31485// This operation concatenates queue-element component tensors along the
31486// 0th dimension to make a single component tensor.  All of the components
31487// in the dequeued tuple will have size n in the 0th dimension.
31488//
31489// This operation has `k` outputs, where `k` is the number of components in
31490// the tuples stored in the given queue, and output `i` is the ith
31491// component of the dequeued tuple.
31492//
31493// Arguments:
31494//	handle: The handle to a queue.
31495//	n: The number of tuples to dequeue.
31496//	component_types: The type of each component in a tuple.
31497//
31498// Returns One or more tensors that were dequeued as a tuple.
31499func QueueDequeueUpToV2(scope *Scope, handle tf.Output, n tf.Output, component_types []tf.DataType, optional ...QueueDequeueUpToV2Attr) (components []tf.Output) {
31500	if scope.Err() != nil {
31501		return
31502	}
31503	attrs := map[string]interface{}{"component_types": component_types}
31504	for _, a := range optional {
31505		a(attrs)
31506	}
31507	opspec := tf.OpSpec{
31508		Type: "QueueDequeueUpToV2",
31509		Input: []tf.Input{
31510			handle, n,
31511		},
31512		Attrs: attrs,
31513	}
31514	op := scope.AddOperation(opspec)
31515	if scope.Err() != nil {
31516		return
31517	}
31518	var idx int
31519	var err error
31520	if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
31521		scope.UpdateErr("QueueDequeueUpToV2", err)
31522		return
31523	}
31524	return components
31525}
31526
31527// Returns the max of x and y (i.e. x > y ? x : y) element-wise.
31528//
31529// *NOTE*: `Maximum` supports broadcasting. More about broadcasting
31530// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
31531func Maximum(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
31532	if scope.Err() != nil {
31533		return
31534	}
31535	opspec := tf.OpSpec{
31536		Type: "Maximum",
31537		Input: []tf.Input{
31538			x, y,
31539		},
31540	}
31541	op := scope.AddOperation(opspec)
31542	return op.Output(0)
31543}
31544
31545// Returns element-wise remainder of division. This emulates C semantics in that
31546//
31547// the result here is consistent with a truncating divide. E.g.
31548// `tf.truncatediv(x, y) * y + truncate_mod(x, y) = x`.
31549//
31550// *NOTE*: `Mod` supports broadcasting. More about broadcasting
31551// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
31552func Mod(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
31553	if scope.Err() != nil {
31554		return
31555	}
31556	opspec := tf.OpSpec{
31557		Type: "Mod",
31558		Input: []tf.Input{
31559			x, y,
31560		},
31561	}
31562	op := scope.AddOperation(opspec)
31563	return op.Output(0)
31564}
31565
31566// Returns element-wise remainder of division. This emulates C semantics in that
31567//
31568// the result here is consistent with a truncating divide. E.g. `truncate(x / y) *
31569// y + truncate_mod(x, y) = x`.
31570//
31571// *NOTE*: `TruncateMod` supports broadcasting. More about broadcasting
31572// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
31573func TruncateMod(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
31574	if scope.Err() != nil {
31575		return
31576	}
31577	opspec := tf.OpSpec{
31578		Type: "TruncateMod",
31579		Input: []tf.Input{
31580			x, y,
31581		},
31582	}
31583	op := scope.AddOperation(opspec)
31584	return op.Output(0)
31585}
31586
31587// Computes offsets of concat inputs within its output.
31588//
31589// For example:
31590//
31591// ```
31592// # 'x' is [2, 2, 7]
31593// # 'y' is [2, 3, 7]
31594// # 'z' is [2, 5, 7]
31595// concat_offset(2, [x, y, z]) => [0, 0, 0], [0, 2, 0], [0, 5, 0]
31596// ```
31597//
31598// This is typically used by gradient computations for a concat operation.
31599//
31600// Arguments:
31601//	concat_dim: The dimension along which to concatenate.
31602//	shape: The `N` int32 vectors representing shape of tensors being concatenated.
31603//
31604// Returns The `N` int32 vectors representing the starting offset
31605// of input tensors within the concatenated output.
31606func ConcatOffset(scope *Scope, concat_dim tf.Output, shape []tf.Output) (offset []tf.Output) {
31607	if scope.Err() != nil {
31608		return
31609	}
31610	opspec := tf.OpSpec{
31611		Type: "ConcatOffset",
31612		Input: []tf.Input{
31613			concat_dim, tf.OutputList(shape),
31614		},
31615	}
31616	op := scope.AddOperation(opspec)
31617	if scope.Err() != nil {
31618		return
31619	}
31620	var idx int
31621	var err error
31622	if offset, idx, err = makeOutputList(op, idx, "offset"); err != nil {
31623		scope.UpdateErr("ConcatOffset", err)
31624		return
31625	}
31626	return offset
31627}
31628
31629// LoadTPUEmbeddingRMSPropParametersGradAccumDebugAttr is an optional argument to LoadTPUEmbeddingRMSPropParametersGradAccumDebug.
31630type LoadTPUEmbeddingRMSPropParametersGradAccumDebugAttr func(optionalAttr)
31631
31632// LoadTPUEmbeddingRMSPropParametersGradAccumDebugTableId sets the optional table_id attribute to value.
31633// If not specified, defaults to -1
31634//
31635// REQUIRES: value >= -1
31636func LoadTPUEmbeddingRMSPropParametersGradAccumDebugTableId(value int64) LoadTPUEmbeddingRMSPropParametersGradAccumDebugAttr {
31637	return func(m optionalAttr) {
31638		m["table_id"] = value
31639	}
31640}
31641
31642// LoadTPUEmbeddingRMSPropParametersGradAccumDebugTableName sets the optional table_name attribute to value.
31643// If not specified, defaults to ""
31644func LoadTPUEmbeddingRMSPropParametersGradAccumDebugTableName(value string) LoadTPUEmbeddingRMSPropParametersGradAccumDebugAttr {
31645	return func(m optionalAttr) {
31646		m["table_name"] = value
31647	}
31648}
31649
31650// Load RMSProp embedding parameters with debug support.
31651//
31652// An op that loads optimization parameters into HBM for embedding. Must be
31653// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
31654// embedding table configuration. For example, this op is used to install
31655// parameters that are loaded from a checkpoint before a training loop is
31656// executed.
31657//
31658// Arguments:
31659//	parameters: Value of parameters used in the RMSProp optimization algorithm.
31660//	ms: Value of ms used in the RMSProp optimization algorithm.
31661//	mom: Value of mom used in the RMSProp optimization algorithm.
31662//	gradient_accumulators: Value of gradient_accumulators used in the RMSProp optimization algorithm.
31663//
31664//
31665//
31666// Returns the created operation.
31667func LoadTPUEmbeddingRMSPropParametersGradAccumDebug(scope *Scope, parameters tf.Output, ms tf.Output, mom tf.Output, gradient_accumulators tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingRMSPropParametersGradAccumDebugAttr) (o *tf.Operation) {
31668	if scope.Err() != nil {
31669		return
31670	}
31671	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
31672	for _, a := range optional {
31673		a(attrs)
31674	}
31675	opspec := tf.OpSpec{
31676		Type: "LoadTPUEmbeddingRMSPropParametersGradAccumDebug",
31677		Input: []tf.Input{
31678			parameters, ms, mom, gradient_accumulators,
31679		},
31680		Attrs: attrs,
31681	}
31682	return scope.AddOperation(opspec)
31683}
31684
31685// Compute the lower regularized incomplete Gamma function `P(a, x)`.
31686//
31687// The lower regularized incomplete Gamma function is defined as:
31688//
31689//
31690// \\(P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x)\\)
31691//
31692// where
31693//
31694// \\(gamma(a, x) = \\int_{0}^{x} t^{a-1} exp(-t) dt\\)
31695//
31696// is the lower incomplete Gamma function.
31697//
31698// Note, above `Q(a, x)` (`Igammac`) is the upper regularized complete
31699// Gamma function.
31700func Igamma(scope *Scope, a tf.Output, x tf.Output) (z tf.Output) {
31701	if scope.Err() != nil {
31702		return
31703	}
31704	opspec := tf.OpSpec{
31705		Type: "Igamma",
31706		Input: []tf.Input{
31707			a, x,
31708		},
31709	}
31710	op := scope.AddOperation(opspec)
31711	return op.Output(0)
31712}
31713
31714// Compute the regularized incomplete beta integral \\(I_x(a, b)\\).
31715//
31716// The regularized incomplete beta integral is defined as:
31717//
31718//
31719// \\(I_x(a, b) = \frac{B(x; a, b)}{B(a, b)}\\)
31720//
31721// where
31722//
31723//
31724// \\(B(x; a, b) = \int_0^x t^{a-1} (1 - t)^{b-1} dt\\)
31725//
31726//
31727// is the incomplete beta function and \\(B(a, b)\\) is the *complete*
31728// beta function.
31729func Betainc(scope *Scope, a tf.Output, b tf.Output, x tf.Output) (z tf.Output) {
31730	if scope.Err() != nil {
31731		return
31732	}
31733	opspec := tf.OpSpec{
31734		Type: "Betainc",
31735		Input: []tf.Input{
31736			a, b, x,
31737		},
31738	}
31739	op := scope.AddOperation(opspec)
31740	return op.Output(0)
31741}
31742
31743// ShapeAttr is an optional argument to Shape.
31744type ShapeAttr func(optionalAttr)
31745
31746// ShapeOutType sets the optional out_type attribute to value.
31747// If not specified, defaults to DT_INT32
31748func ShapeOutType(value tf.DataType) ShapeAttr {
31749	return func(m optionalAttr) {
31750		m["out_type"] = value
31751	}
31752}
31753
31754// Returns the shape of a tensor.
31755//
31756// This operation returns a 1-D integer tensor representing the shape of `input`.
31757//
31758// For example:
31759//
31760// ```
31761// # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
31762// shape(t) ==> [2, 2, 3]
31763// ```
31764func Shape(scope *Scope, input tf.Output, optional ...ShapeAttr) (output tf.Output) {
31765	if scope.Err() != nil {
31766		return
31767	}
31768	attrs := map[string]interface{}{}
31769	for _, a := range optional {
31770		a(attrs)
31771	}
31772	opspec := tf.OpSpec{
31773		Type: "Shape",
31774		Input: []tf.Input{
31775			input,
31776		},
31777		Attrs: attrs,
31778	}
31779	op := scope.AddOperation(opspec)
31780	return op.Output(0)
31781}
31782
31783// Computes fingerprints of the input strings.
31784//
31785// Arguments:
31786//	input: vector of strings to compute fingerprints on.
31787//
31788// Returns a (N,2) shaped matrix where N is the number of elements in the input
31789// vector. Each row contains the low and high parts of the fingerprint.
31790func SdcaFprint(scope *Scope, input tf.Output) (output tf.Output) {
31791	if scope.Err() != nil {
31792		return
31793	}
31794	opspec := tf.OpSpec{
31795		Type: "SdcaFprint",
31796		Input: []tf.Input{
31797			input,
31798		},
31799	}
31800	op := scope.AddOperation(opspec)
31801	return op.Output(0)
31802}
31803
31804// Computes the power of one value to another.
31805//
31806// Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for
31807// corresponding elements in `x` and `y`. For example:
31808//
31809// ```
31810// # tensor 'x' is [[2, 2]], [3, 3]]
31811// # tensor 'y' is [[8, 16], [2, 3]]
31812// tf.pow(x, y) ==> [[256, 65536], [9, 27]]
31813// ```
31814func Pow(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
31815	if scope.Err() != nil {
31816		return
31817	}
31818	opspec := tf.OpSpec{
31819		Type: "Pow",
31820		Input: []tf.Input{
31821			x, y,
31822		},
31823	}
31824	op := scope.AddOperation(opspec)
31825	return op.Output(0)
31826}
31827
31828// QuantizedReluXAttr is an optional argument to QuantizedReluX.
31829type QuantizedReluXAttr func(optionalAttr)
31830
31831// QuantizedReluXOutType sets the optional out_type attribute to value.
31832// If not specified, defaults to DT_QUINT8
31833func QuantizedReluXOutType(value tf.DataType) QuantizedReluXAttr {
31834	return func(m optionalAttr) {
31835		m["out_type"] = value
31836	}
31837}
31838
31839// Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)`
31840//
31841// Arguments:
31842//
31843//
31844//	min_features: The float value that the lowest quantized value represents.
31845//	max_features: The float value that the highest quantized value represents.
31846//
31847// Returns Has the same output shape as "features".The float value that the lowest quantized value represents.The float value that the highest quantized value represents.
31848func QuantizedReluX(scope *Scope, features tf.Output, max_value tf.Output, min_features tf.Output, max_features tf.Output, optional ...QuantizedReluXAttr) (activations tf.Output, min_activations tf.Output, max_activations tf.Output) {
31849	if scope.Err() != nil {
31850		return
31851	}
31852	attrs := map[string]interface{}{}
31853	for _, a := range optional {
31854		a(attrs)
31855	}
31856	opspec := tf.OpSpec{
31857		Type: "QuantizedReluX",
31858		Input: []tf.Input{
31859			features, max_value, min_features, max_features,
31860		},
31861		Attrs: attrs,
31862	}
31863	op := scope.AddOperation(opspec)
31864	return op.Output(0), op.Output(1), op.Output(2)
31865}
31866
31867// Returns the truth value of (x < y) element-wise.
31868//
31869// *NOTE*: `Less` supports broadcasting. More about broadcasting
31870// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
31871func Less(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
31872	if scope.Err() != nil {
31873		return
31874	}
31875	opspec := tf.OpSpec{
31876		Type: "Less",
31877		Input: []tf.Input{
31878			x, y,
31879		},
31880	}
31881	op := scope.AddOperation(opspec)
31882	return op.Output(0)
31883}
31884
31885// RandomPoissonAttr is an optional argument to RandomPoisson.
31886type RandomPoissonAttr func(optionalAttr)
31887
31888// RandomPoissonSeed sets the optional seed attribute to value.
31889// If not specified, defaults to 0
31890func RandomPoissonSeed(value int64) RandomPoissonAttr {
31891	return func(m optionalAttr) {
31892		m["seed"] = value
31893	}
31894}
31895
31896// RandomPoissonSeed2 sets the optional seed2 attribute to value.
31897// If not specified, defaults to 0
31898func RandomPoissonSeed2(value int64) RandomPoissonAttr {
31899	return func(m optionalAttr) {
31900		m["seed2"] = value
31901	}
31902}
31903
31904// Use RandomPoissonV2 instead.
31905//
31906// DEPRECATED at GraphDef version 25: Replaced by RandomPoissonV2
31907func RandomPoisson(scope *Scope, shape tf.Output, rate tf.Output, optional ...RandomPoissonAttr) (output tf.Output) {
31908	if scope.Err() != nil {
31909		return
31910	}
31911	attrs := map[string]interface{}{}
31912	for _, a := range optional {
31913		a(attrs)
31914	}
31915	opspec := tf.OpSpec{
31916		Type: "RandomPoisson",
31917		Input: []tf.Input{
31918			shape, rate,
31919		},
31920		Attrs: attrs,
31921	}
31922	op := scope.AddOperation(opspec)
31923	return op.Output(0)
31924}
31925
31926// Gets the next output from the given iterator.
31927//
31928// This operation is a synchronous version IteratorGetNext. It should only be used
31929// in situations where the iterator does not block the calling thread, or where
31930// the calling thread is not a member of the thread pool used to execute parallel
31931// operations (e.g. in eager mode).
31932func IteratorGetNextSync(scope *Scope, iterator tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (components []tf.Output) {
31933	if scope.Err() != nil {
31934		return
31935	}
31936	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
31937	opspec := tf.OpSpec{
31938		Type: "IteratorGetNextSync",
31939		Input: []tf.Input{
31940			iterator,
31941		},
31942		Attrs: attrs,
31943	}
31944	op := scope.AddOperation(opspec)
31945	if scope.Err() != nil {
31946		return
31947	}
31948	var idx int
31949	var err error
31950	if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
31951		scope.UpdateErr("IteratorGetNextSync", err)
31952		return
31953	}
31954	return components
31955}
31956
31957// Returns the truth value of (x >= y) element-wise.
31958//
31959// *NOTE*: `GreaterEqual` supports broadcasting. More about broadcasting
31960// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
31961func GreaterEqual(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
31962	if scope.Err() != nil {
31963		return
31964	}
31965	opspec := tf.OpSpec{
31966		Type: "GreaterEqual",
31967		Input: []tf.Input{
31968			x, y,
31969		},
31970	}
31971	op := scope.AddOperation(opspec)
31972	return op.Output(0)
31973}
31974
31975// ApproximateEqualAttr is an optional argument to ApproximateEqual.
31976type ApproximateEqualAttr func(optionalAttr)
31977
31978// ApproximateEqualTolerance sets the optional tolerance attribute to value.
31979// If not specified, defaults to 1e-05
31980func ApproximateEqualTolerance(value float32) ApproximateEqualAttr {
31981	return func(m optionalAttr) {
31982		m["tolerance"] = value
31983	}
31984}
31985
31986// Returns the truth value of abs(x-y) < tolerance element-wise.
31987func ApproximateEqual(scope *Scope, x tf.Output, y tf.Output, optional ...ApproximateEqualAttr) (z tf.Output) {
31988	if scope.Err() != nil {
31989		return
31990	}
31991	attrs := map[string]interface{}{}
31992	for _, a := range optional {
31993		a(attrs)
31994	}
31995	opspec := tf.OpSpec{
31996		Type: "ApproximateEqual",
31997		Input: []tf.Input{
31998			x, y,
31999		},
32000		Attrs: attrs,
32001	}
32002	op := scope.AddOperation(opspec)
32003	return op.Output(0)
32004}
32005
32006// Returns the truth value of x OR y element-wise.
32007//
32008// *NOTE*: `LogicalOr` supports broadcasting. More about broadcasting
32009// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
32010func LogicalOr(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
32011	if scope.Err() != nil {
32012		return
32013	}
32014	opspec := tf.OpSpec{
32015		Type: "LogicalOr",
32016		Input: []tf.Input{
32017			x, y,
32018		},
32019	}
32020	op := scope.AddOperation(opspec)
32021	return op.Output(0)
32022}
32023
32024// MatMulAttr is an optional argument to MatMul.
32025type MatMulAttr func(optionalAttr)
32026
32027// MatMulTransposeA sets the optional transpose_a attribute to value.
32028//
32029// value: If true, "a" is transposed before multiplication.
32030// If not specified, defaults to false
32031func MatMulTransposeA(value bool) MatMulAttr {
32032	return func(m optionalAttr) {
32033		m["transpose_a"] = value
32034	}
32035}
32036
32037// MatMulTransposeB sets the optional transpose_b attribute to value.
32038//
32039// value: If true, "b" is transposed before multiplication.
32040// If not specified, defaults to false
32041func MatMulTransposeB(value bool) MatMulAttr {
32042	return func(m optionalAttr) {
32043		m["transpose_b"] = value
32044	}
32045}
32046
32047// Multiply the matrix "a" by the matrix "b".
32048//
32049// The inputs must be two-dimensional matrices and the inner dimension of
32050// "a" (after being transposed if transpose_a is true) must match the
32051// outer dimension of "b" (after being transposed if transposed_b is
32052// true).
32053//
32054// *Note*: The default kernel implementation for MatMul on GPUs uses
32055// cublas.
32056func MatMul(scope *Scope, a tf.Output, b tf.Output, optional ...MatMulAttr) (product tf.Output) {
32057	if scope.Err() != nil {
32058		return
32059	}
32060	attrs := map[string]interface{}{}
32061	for _, a := range optional {
32062		a(attrs)
32063	}
32064	opspec := tf.OpSpec{
32065		Type: "MatMul",
32066		Input: []tf.Input{
32067			a, b,
32068		},
32069		Attrs: attrs,
32070	}
32071	op := scope.AddOperation(opspec)
32072	return op.Output(0)
32073}
32074
32075// InitializeTableFromTextFileV2Attr is an optional argument to InitializeTableFromTextFileV2.
32076type InitializeTableFromTextFileV2Attr func(optionalAttr)
32077
32078// InitializeTableFromTextFileV2VocabSize sets the optional vocab_size attribute to value.
32079//
32080// value: Number of elements of the file, use -1 if unknown.
32081// If not specified, defaults to -1
32082//
32083// REQUIRES: value >= -1
32084func InitializeTableFromTextFileV2VocabSize(value int64) InitializeTableFromTextFileV2Attr {
32085	return func(m optionalAttr) {
32086		m["vocab_size"] = value
32087	}
32088}
32089
32090// InitializeTableFromTextFileV2Delimiter sets the optional delimiter attribute to value.
32091//
32092// value: Delimiter to separate fields in a line.
32093// If not specified, defaults to "\t"
32094func InitializeTableFromTextFileV2Delimiter(value string) InitializeTableFromTextFileV2Attr {
32095	return func(m optionalAttr) {
32096		m["delimiter"] = value
32097	}
32098}
32099
32100// Initializes a table from a text file.
32101//
32102// It inserts one key-value pair into the table for each line of the file.
32103// The key and value is extracted from the whole line content, elements from the
32104// split line based on `delimiter` or the line number (starting from zero).
32105// Where to extract the key and value from a line is specified by `key_index` and
32106// `value_index`.
32107//
32108// - A value of -1 means use the line number(starting from zero), expects `int64`.
32109// - A value of -2 means use the whole line content, expects `string`.
32110// - A value >= 0 means use the index (starting at zero) of the split line based
32111//   on `delimiter`.
32112//
32113// Arguments:
32114//	table_handle: Handle to a table which will be initialized.
32115//	filename: Filename of a vocabulary text file.
32116//	key_index: Column index in a line to get the table `key` values from.
32117//	value_index: Column index that represents information of a line to get the table
32118// `value` values from.
32119//
32120// Returns the created operation.
32121func InitializeTableFromTextFileV2(scope *Scope, table_handle tf.Output, filename tf.Output, key_index int64, value_index int64, optional ...InitializeTableFromTextFileV2Attr) (o *tf.Operation) {
32122	if scope.Err() != nil {
32123		return
32124	}
32125	attrs := map[string]interface{}{"key_index": key_index, "value_index": value_index}
32126	for _, a := range optional {
32127		a(attrs)
32128	}
32129	opspec := tf.OpSpec{
32130		Type: "InitializeTableFromTextFileV2",
32131		Input: []tf.Input{
32132			table_handle, filename,
32133		},
32134		Attrs: attrs,
32135	}
32136	return scope.AddOperation(opspec)
32137}
32138
32139// MeanAttr is an optional argument to Mean.
32140type MeanAttr func(optionalAttr)
32141
32142// MeanKeepDims sets the optional keep_dims attribute to value.
32143//
32144// value: If true, retain reduced dimensions with length 1.
32145// If not specified, defaults to false
32146func MeanKeepDims(value bool) MeanAttr {
32147	return func(m optionalAttr) {
32148		m["keep_dims"] = value
32149	}
32150}
32151
32152// Computes the mean of elements across dimensions of a tensor.
32153//
32154// Reduces `input` along the dimensions given in `axis`. Unless
32155// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
32156// `axis`. If `keep_dims` is true, the reduced dimensions are
32157// retained with length 1.
32158//
32159// Arguments:
32160//	input: The tensor to reduce.
32161//	axis: The dimensions to reduce. Must be in the range
32162// `[-rank(input), rank(input))`.
32163//
32164// Returns The reduced tensor.
32165func Mean(scope *Scope, input tf.Output, axis tf.Output, optional ...MeanAttr) (output tf.Output) {
32166	if scope.Err() != nil {
32167		return
32168	}
32169	attrs := map[string]interface{}{}
32170	for _, a := range optional {
32171		a(attrs)
32172	}
32173	opspec := tf.OpSpec{
32174		Type: "Mean",
32175		Input: []tf.Input{
32176			input, axis,
32177		},
32178		Attrs: attrs,
32179	}
32180	op := scope.AddOperation(opspec)
32181	return op.Output(0)
32182}
32183
32184// ProdAttr is an optional argument to Prod.
32185type ProdAttr func(optionalAttr)
32186
32187// ProdKeepDims sets the optional keep_dims attribute to value.
32188//
32189// value: If true, retain reduced dimensions with length 1.
32190// If not specified, defaults to false
32191func ProdKeepDims(value bool) ProdAttr {
32192	return func(m optionalAttr) {
32193		m["keep_dims"] = value
32194	}
32195}
32196
32197// Computes the product of elements across dimensions of a tensor.
32198//
32199// Reduces `input` along the dimensions given in `axis`. Unless
32200// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
32201// `axis`. If `keep_dims` is true, the reduced dimensions are
32202// retained with length 1.
32203//
32204// Arguments:
32205//	input: The tensor to reduce.
32206//	axis: The dimensions to reduce. Must be in the range
32207// `[-rank(input), rank(input))`.
32208//
32209// Returns The reduced tensor.
32210func Prod(scope *Scope, input tf.Output, axis tf.Output, optional ...ProdAttr) (output tf.Output) {
32211	if scope.Err() != nil {
32212		return
32213	}
32214	attrs := map[string]interface{}{}
32215	for _, a := range optional {
32216		a(attrs)
32217	}
32218	opspec := tf.OpSpec{
32219		Type: "Prod",
32220		Input: []tf.Input{
32221			input, axis,
32222		},
32223		Attrs: attrs,
32224	}
32225	op := scope.AddOperation(opspec)
32226	return op.Output(0)
32227}
32228
32229// ResizeBilinearAttr is an optional argument to ResizeBilinear.
32230type ResizeBilinearAttr func(optionalAttr)
32231
32232// ResizeBilinearAlignCorners sets the optional align_corners attribute to value.
32233//
32234// value: If true, the centers of the 4 corner pixels of the input and output tensors are
32235// aligned, preserving the values at the corner pixels. Defaults to false.
32236// If not specified, defaults to false
32237func ResizeBilinearAlignCorners(value bool) ResizeBilinearAttr {
32238	return func(m optionalAttr) {
32239		m["align_corners"] = value
32240	}
32241}
32242
32243// ResizeBilinearHalfPixelCenters sets the optional half_pixel_centers attribute to value.
32244// If not specified, defaults to false
32245func ResizeBilinearHalfPixelCenters(value bool) ResizeBilinearAttr {
32246	return func(m optionalAttr) {
32247		m["half_pixel_centers"] = value
32248	}
32249}
32250
32251// Resize `images` to `size` using bilinear interpolation.
32252//
32253// Input images can be of different types but output images are always float.
32254//
32255// Arguments:
32256//	images: 4-D with shape `[batch, height, width, channels]`.
32257//	size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
32258// new size for the images.
32259//
32260// Returns 4-D with shape
32261// `[batch, new_height, new_width, channels]`.
32262func ResizeBilinear(scope *Scope, images tf.Output, size tf.Output, optional ...ResizeBilinearAttr) (resized_images tf.Output) {
32263	if scope.Err() != nil {
32264		return
32265	}
32266	attrs := map[string]interface{}{}
32267	for _, a := range optional {
32268		a(attrs)
32269	}
32270	opspec := tf.OpSpec{
32271		Type: "ResizeBilinear",
32272		Input: []tf.Input{
32273			images, size,
32274		},
32275		Attrs: attrs,
32276	}
32277	op := scope.AddOperation(opspec)
32278	return op.Output(0)
32279}
32280
32281// MaxAttr is an optional argument to Max.
32282type MaxAttr func(optionalAttr)
32283
32284// MaxKeepDims sets the optional keep_dims attribute to value.
32285//
32286// value: If true, retain reduced dimensions with length 1.
32287// If not specified, defaults to false
32288func MaxKeepDims(value bool) MaxAttr {
32289	return func(m optionalAttr) {
32290		m["keep_dims"] = value
32291	}
32292}
32293
32294// Computes the maximum of elements across dimensions of a tensor.
32295//
32296// Reduces `input` along the dimensions given in `axis`. Unless
32297// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
32298// `axis`. If `keep_dims` is true, the reduced dimensions are
32299// retained with length 1.
32300//
32301// Arguments:
32302//	input: The tensor to reduce.
32303//	axis: The dimensions to reduce. Must be in the range
32304// `[-rank(input), rank(input))`.
32305//
32306// Returns The reduced tensor.
32307func Max(scope *Scope, input tf.Output, axis tf.Output, optional ...MaxAttr) (output tf.Output) {
32308	if scope.Err() != nil {
32309		return
32310	}
32311	attrs := map[string]interface{}{}
32312	for _, a := range optional {
32313		a(attrs)
32314	}
32315	opspec := tf.OpSpec{
32316		Type: "Max",
32317		Input: []tf.Input{
32318			input, axis,
32319		},
32320		Attrs: attrs,
32321	}
32322	op := scope.AddOperation(opspec)
32323	return op.Output(0)
32324}
32325
32326// Creates a dataset that contains the unique elements of `input_dataset`.
32327func ExperimentalUniqueDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
32328	if scope.Err() != nil {
32329		return
32330	}
32331	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
32332	opspec := tf.OpSpec{
32333		Type: "ExperimentalUniqueDataset",
32334		Input: []tf.Input{
32335			input_dataset,
32336		},
32337		Attrs: attrs,
32338	}
32339	op := scope.AddOperation(opspec)
32340	return op.Output(0)
32341}
32342
32343// ArgMinAttr is an optional argument to ArgMin.
32344type ArgMinAttr func(optionalAttr)
32345
32346// ArgMinOutputType sets the optional output_type attribute to value.
32347// If not specified, defaults to DT_INT64
32348func ArgMinOutputType(value tf.DataType) ArgMinAttr {
32349	return func(m optionalAttr) {
32350		m["output_type"] = value
32351	}
32352}
32353
32354// Returns the index with the smallest value across dimensions of a tensor.
32355//
32356// Note that in case of ties the identity of the return value is not guaranteed.
32357//
32358// Arguments:
32359//
32360//	dimension: int32 or int64, must be in the range `[-rank(input), rank(input))`.
32361// Describes which dimension of the input Tensor to reduce across. For vectors,
32362// use dimension = 0.
32363func ArgMin(scope *Scope, input tf.Output, dimension tf.Output, optional ...ArgMinAttr) (output tf.Output) {
32364	if scope.Err() != nil {
32365		return
32366	}
32367	attrs := map[string]interface{}{}
32368	for _, a := range optional {
32369		a(attrs)
32370	}
32371	opspec := tf.OpSpec{
32372		Type: "ArgMin",
32373		Input: []tf.Input{
32374			input, dimension,
32375		},
32376		Attrs: attrs,
32377	}
32378	op := scope.AddOperation(opspec)
32379	return op.Output(0)
32380}
32381
32382// Converts the quantized `input` tensor into a lower-precision `output`.
32383//
32384// Converts the quantized `input` tensor into a lower-precision `output`, using the
32385// output range specified with `requested_output_min` and `requested_output_max`.
32386//
32387// `[input_min, input_max]` are scalar floats that specify the range for the float
32388// interpretation of the `input` data. For example, if `input_min` is -1.0f and
32389// `input_max` is 1.0f, and we are dealing with `quint16` quantized data, then a 0
32390// value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.
32391//
32392// Arguments:
32393//
32394//	input_min: The float value that the minimum quantized input value represents.
32395//	input_max: The float value that the maximum quantized input value represents.
32396//	requested_output_min: The float value that the minimum quantized output value represents.
32397//	requested_output_max: The float value that the maximum quantized output value represents.
32398//	out_type: The type of the output. Should be a lower bit depth than Tinput.
32399//
32400// Returns The requested_output_min value is copied into this output.The requested_output_max value is copied into this output.
32401func Requantize(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, requested_output_min tf.Output, requested_output_max tf.Output, out_type tf.DataType) (output tf.Output, output_min tf.Output, output_max tf.Output) {
32402	if scope.Err() != nil {
32403		return
32404	}
32405	attrs := map[string]interface{}{"out_type": out_type}
32406	opspec := tf.OpSpec{
32407		Type: "Requantize",
32408		Input: []tf.Input{
32409			input, input_min, input_max, requested_output_min, requested_output_max,
32410		},
32411		Attrs: attrs,
32412	}
32413	op := scope.AddOperation(opspec)
32414	return op.Output(0), op.Output(1), op.Output(2)
32415}
32416
32417// Creates a dataset that emits the lines of one or more text files.
32418//
32419// Arguments:
32420//	filenames: A scalar or a vector containing the name(s) of the file(s) to be
32421// read.
32422//	compression_type: A scalar containing either (i) the empty string (no
32423// compression), (ii) "ZLIB", or (iii) "GZIP".
32424//	buffer_size: A scalar containing the number of bytes to buffer.
32425func TextLineDataset(scope *Scope, filenames tf.Output, compression_type tf.Output, buffer_size tf.Output) (handle tf.Output) {
32426	if scope.Err() != nil {
32427		return
32428	}
32429	opspec := tf.OpSpec{
32430		Type: "TextLineDataset",
32431		Input: []tf.Input{
32432			filenames, compression_type, buffer_size,
32433		},
32434	}
32435	op := scope.AddOperation(opspec)
32436	return op.Output(0)
32437}
32438
32439// Computes the sum along segments of a tensor.
32440//
32441// Read
32442// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
32443// for an explanation of segments.
32444//
32445// Computes a tensor such that
32446// \\(output_i = \sum_j data_j\\) where sum is over `j` such
32447// that `segment_ids[j] == i`.
32448//
32449// If the sum is empty for a given segment ID `i`, `output[i] = 0`.
32450//
32451// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
32452// <img style="width:100%" src="https://www.tensorflow.org/images/SegmentSum.png" alt>
32453// </div>
32454//
32455// For example:
32456//
32457// ```
32458// c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]])
32459// tf.segment_sum(c, tf.constant([0, 0, 1]))
32460// # ==> [[5, 5, 5, 5],
32461// #      [5, 6, 7, 8]]
32462// ```
32463//
32464//
32465// Arguments:
32466//
32467//	segment_ids: A 1-D tensor whose size is equal to the size of `data`'s
32468// first dimension.  Values should be sorted and can be repeated.
32469//
32470// Returns Has same shape as data, except for dimension 0 which
32471// has size `k`, the number of segments.
32472func SegmentSum(scope *Scope, data tf.Output, segment_ids tf.Output) (output tf.Output) {
32473	if scope.Err() != nil {
32474		return
32475	}
32476	opspec := tf.OpSpec{
32477		Type: "SegmentSum",
32478		Input: []tf.Input{
32479			data, segment_ids,
32480		},
32481	}
32482	op := scope.AddOperation(opspec)
32483	return op.Output(0)
32484}
32485
32486// Computes the mean along segments of a tensor.
32487//
32488// Read
32489// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
32490// for an explanation of segments.
32491//
32492// Computes a tensor such that
32493// \\(output_i = \frac{\sum_j data_j}{N}\\) where `mean` is
32494// over `j` such that `segment_ids[j] == i` and `N` is the total number of
32495// values summed.
32496//
32497// If the mean is empty for a given segment ID `i`, `output[i] = 0`.
32498//
32499// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
32500// <img style="width:100%" src="https://www.tensorflow.org/images/SegmentMean.png" alt>
32501// </div>
32502//
32503// For example:
32504//
32505// ```
32506// c = tf.constant([[1.0,2,3,4], [4, 3, 2, 1], [5,6,7,8]])
32507// tf.segment_mean(c, tf.constant([0, 0, 1]))
32508// # ==> [[2.5, 2.5, 2.5, 2.5],
32509// #      [5, 6, 7, 8]]
32510// ```
32511//
32512//
32513// Arguments:
32514//
32515//	segment_ids: A 1-D tensor whose size is equal to the size of `data`'s
32516// first dimension.  Values should be sorted and can be repeated.
32517//
32518// Returns Has same shape as data, except for dimension 0 which
32519// has size `k`, the number of segments.
32520func SegmentMean(scope *Scope, data tf.Output, segment_ids tf.Output) (output tf.Output) {
32521	if scope.Err() != nil {
32522		return
32523	}
32524	opspec := tf.OpSpec{
32525		Type: "SegmentMean",
32526		Input: []tf.Input{
32527			data, segment_ids,
32528		},
32529	}
32530	op := scope.AddOperation(opspec)
32531	return op.Output(0)
32532}
32533
32534// Computes the minimum along segments of a tensor.
32535//
32536// Read
32537// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
32538// for an explanation of segments.
32539//
32540// Computes a tensor such that
32541// \\(output_i = \min_j(data_j)\\) where `min` is over `j` such
32542// that `segment_ids[j] == i`.
32543//
32544// If the min is empty for a given segment ID `i`, `output[i] = 0`.
32545//
32546// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
32547// <img style="width:100%" src="https://www.tensorflow.org/images/SegmentMin.png" alt>
32548// </div>
32549//
32550// For example:
32551//
32552// ```
32553// c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]])
32554// tf.segment_min(c, tf.constant([0, 0, 1]))
32555// # ==> [[1, 2, 2, 1],
32556// #      [5, 6, 7, 8]]
32557// ```
32558//
32559// Arguments:
32560//
32561//	segment_ids: A 1-D tensor whose size is equal to the size of `data`'s
32562// first dimension.  Values should be sorted and can be repeated.
32563//
32564// Returns Has same shape as data, except for dimension 0 which
32565// has size `k`, the number of segments.
32566func SegmentMin(scope *Scope, data tf.Output, segment_ids tf.Output) (output tf.Output) {
32567	if scope.Err() != nil {
32568		return
32569	}
32570	opspec := tf.OpSpec{
32571		Type: "SegmentMin",
32572		Input: []tf.Input{
32573			data, segment_ids,
32574		},
32575	}
32576	op := scope.AddOperation(opspec)
32577	return op.Output(0)
32578}
32579
32580// Computes the sum along segments of a tensor.
32581//
32582// Read
32583// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
32584// for an explanation of segments.
32585//
32586// Computes a tensor such that
32587// \\(output[i] = \sum_{j...} data[j...]\\) where the sum is over tuples `j...` such
32588// that `segment_ids[j...] == i`.  Unlike `SegmentSum`, `segment_ids`
32589// need not be sorted and need not cover all values in the full
32590// range of valid values.
32591//
32592// If the sum is empty for a given segment ID `i`, `output[i] = 0`.
32593// If the given segment ID `i` is negative, the value is dropped and will not be
32594// added to the sum of the segment.
32595//
32596// `num_segments` should equal the number of distinct segment IDs.
32597//
32598// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
32599// <img style="width:100%" src="https://www.tensorflow.org/images/UnsortedSegmentSum.png" alt>
32600// </div>
32601//
32602// ``` python
32603// c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]])
32604// tf.unsorted_segment_sum(c, tf.constant([0, 1, 0]), num_segments=2)
32605// # ==> [[ 5,  5, 5, 5],
32606// #       [5,  6, 7, 8]]
32607// ```
32608//
32609//
32610// Arguments:
32611//
32612//	segment_ids: A tensor whose shape is a prefix of `data.shape`.
32613//
32614//
32615// Returns Has same shape as data, except for the first `segment_ids.rank`
32616// dimensions, which are replaced with a single dimension which has size
32617// `num_segments`.
32618func UnsortedSegmentSum(scope *Scope, data tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output) {
32619	if scope.Err() != nil {
32620		return
32621	}
32622	opspec := tf.OpSpec{
32623		Type: "UnsortedSegmentSum",
32624		Input: []tf.Input{
32625			data, segment_ids, num_segments,
32626		},
32627	}
32628	op := scope.AddOperation(opspec)
32629	return op.Output(0)
32630}
32631
32632// Computes the product along segments of a tensor.
32633//
32634// Read
32635// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
32636// for an explanation of segments.
32637//
32638// This operator is similar to the unsorted segment sum operator found
32639// [(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).
32640// Instead of computing the sum over segments, it computes the product of all
32641// entries belonging to a segment such that:
32642//
32643// \\(output_i = \prod_{j...} data[j...]\\) where the product is over tuples
32644// `j...` such that `segment_ids[j...] == i`.
32645//
32646// For example:
32647//
32648// ``` python
32649// c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]])
32650// tf.unsorted_segment_prod(c, tf.constant([0, 1, 0]), num_segments=2)
32651// # ==> [[ 4,  6, 6, 4],
32652// #       [5,  6, 7, 8]]
32653// ```
32654//
32655// If there is no entry for a given segment ID `i`, it outputs 1.
32656//
32657// If the given segment ID `i` is negative, then the corresponding value is
32658// dropped, and will not be included in the result.
32659//
32660// Arguments:
32661//
32662//	segment_ids: A tensor whose shape is a prefix of `data.shape`.
32663//
32664//
32665// Returns Has same shape as data, except for the first `segment_ids.rank`
32666// dimensions, which are replaced with a single dimension which has size
32667// `num_segments`.
32668func UnsortedSegmentProd(scope *Scope, data tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output) {
32669	if scope.Err() != nil {
32670		return
32671	}
32672	opspec := tf.OpSpec{
32673		Type: "UnsortedSegmentProd",
32674		Input: []tf.Input{
32675			data, segment_ids, num_segments,
32676		},
32677	}
32678	op := scope.AddOperation(opspec)
32679	return op.Output(0)
32680}
32681
32682// Computes hyperbolic cosine of x element-wise.
32683func Cosh(scope *Scope, x tf.Output) (y tf.Output) {
32684	if scope.Err() != nil {
32685		return
32686	}
32687	opspec := tf.OpSpec{
32688		Type: "Cosh",
32689		Input: []tf.Input{
32690			x,
32691		},
32692	}
32693	op := scope.AddOperation(opspec)
32694	return op.Output(0)
32695}
32696
32697// Computes the mean along sparse segments of a tensor.
32698//
32699// Like `SparseSegmentMean`, but allows missing ids in `segment_ids`. If an id is
32700// misisng, the `output` tensor at that position will be zeroed.
32701//
32702// Read
32703// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
32704// for an explanation of segments.
32705//
32706// Arguments:
32707//
32708//	indices: A 1-D tensor. Has same rank as `segment_ids`.
32709//	segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
32710//	num_segments: Should equal the number of distinct segment IDs.
32711//
32712// Returns Has same shape as data, except for dimension 0 which has size
32713// `num_segments`.
32714func SparseSegmentMeanWithNumSegments(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output) {
32715	if scope.Err() != nil {
32716		return
32717	}
32718	opspec := tf.OpSpec{
32719		Type: "SparseSegmentMeanWithNumSegments",
32720		Input: []tf.Input{
32721			data, indices, segment_ids, num_segments,
32722		},
32723	}
32724	op := scope.AddOperation(opspec)
32725	return op.Output(0)
32726}
32727
32728// CudnnRNNParamsSizeAttr is an optional argument to CudnnRNNParamsSize.
32729type CudnnRNNParamsSizeAttr func(optionalAttr)
32730
32731// CudnnRNNParamsSizeRnnMode sets the optional rnn_mode attribute to value.
32732// If not specified, defaults to "lstm"
32733func CudnnRNNParamsSizeRnnMode(value string) CudnnRNNParamsSizeAttr {
32734	return func(m optionalAttr) {
32735		m["rnn_mode"] = value
32736	}
32737}
32738
32739// CudnnRNNParamsSizeInputMode sets the optional input_mode attribute to value.
32740// If not specified, defaults to "linear_input"
32741func CudnnRNNParamsSizeInputMode(value string) CudnnRNNParamsSizeAttr {
32742	return func(m optionalAttr) {
32743		m["input_mode"] = value
32744	}
32745}
32746
32747// CudnnRNNParamsSizeDirection sets the optional direction attribute to value.
32748// If not specified, defaults to "unidirectional"
32749func CudnnRNNParamsSizeDirection(value string) CudnnRNNParamsSizeAttr {
32750	return func(m optionalAttr) {
32751		m["direction"] = value
32752	}
32753}
32754
32755// CudnnRNNParamsSizeDropout sets the optional dropout attribute to value.
32756// If not specified, defaults to 0
32757func CudnnRNNParamsSizeDropout(value float32) CudnnRNNParamsSizeAttr {
32758	return func(m optionalAttr) {
32759		m["dropout"] = value
32760	}
32761}
32762
32763// CudnnRNNParamsSizeSeed sets the optional seed attribute to value.
32764// If not specified, defaults to 0
32765func CudnnRNNParamsSizeSeed(value int64) CudnnRNNParamsSizeAttr {
32766	return func(m optionalAttr) {
32767		m["seed"] = value
32768	}
32769}
32770
32771// CudnnRNNParamsSizeSeed2 sets the optional seed2 attribute to value.
32772// If not specified, defaults to 0
32773func CudnnRNNParamsSizeSeed2(value int64) CudnnRNNParamsSizeAttr {
32774	return func(m optionalAttr) {
32775		m["seed2"] = value
32776	}
32777}
32778
32779// Computes size of weights that can be used by a Cudnn RNN model.
32780//
32781// Return the params size that can be used by the Cudnn RNN model. Subsequent
32782// weight allocation and initialization should use this size.
32783//
32784// num_layers: Specifies the number of layers in the RNN model.
32785// num_units: Specifies the size of the hidden state.
32786// input_size: Specifies the size of the input state.
32787// rnn_mode: Indicates the type of the RNN model.
32788// input_mode: Indicate whether there is a linear projection between the input and
32789//   The actual computation before the first layer. 'skip_input' is only allowed
32790//   when input_size == num_units; 'auto_select' implies 'skip_input' when
32791//   input_size == num_units; otherwise, it implies 'linear_input'.
32792// direction: Indicates whether a bidirectional model will be used.
32793//   dir = (direction == bidirectional) ? 2 : 1
32794// dropout: dropout probability. When set to 0., dropout is disabled.
32795// seed: the 1st part of a seed to initialize dropout.
32796// seed2: the 2nd part of a seed to initialize dropout.
32797// params_size: The size of the params buffer that should be allocated and
32798//   initialized for this RNN model. Note that this params buffer may not be
32799//   compatible across GPUs. Please use CudnnRNNParamsWeights and
32800//   CudnnRNNParamsBiases to save and restore them in a way that is compatible
32801//   across different runs.
32802func CudnnRNNParamsSize(scope *Scope, num_layers tf.Output, num_units tf.Output, input_size tf.Output, T tf.DataType, S tf.DataType, optional ...CudnnRNNParamsSizeAttr) (params_size tf.Output) {
32803	if scope.Err() != nil {
32804		return
32805	}
32806	attrs := map[string]interface{}{"T": T, "S": S}
32807	for _, a := range optional {
32808		a(attrs)
32809	}
32810	opspec := tf.OpSpec{
32811		Type: "CudnnRNNParamsSize",
32812		Input: []tf.Input{
32813			num_layers, num_units, input_size,
32814		},
32815		Attrs: attrs,
32816	}
32817	op := scope.AddOperation(opspec)
32818	return op.Output(0)
32819}
32820
32821// Computes gradients for SparseSegmentMean.
32822//
32823// Returns tensor "output" with same shape as grad, except for dimension 0 whose
32824// value is output_dim0.
32825//
32826// Arguments:
32827//	grad: gradient propagated to the SparseSegmentMean op.
32828//	indices: indices passed to the corresponding SparseSegmentMean op.
32829//	segment_ids: segment_ids passed to the corresponding SparseSegmentMean op.
32830//	output_dim0: dimension 0 of "data" passed to SparseSegmentMean op.
32831func SparseSegmentMeanGrad(scope *Scope, grad tf.Output, indices tf.Output, segment_ids tf.Output, output_dim0 tf.Output) (output tf.Output) {
32832	if scope.Err() != nil {
32833		return
32834	}
32835	opspec := tf.OpSpec{
32836		Type: "SparseSegmentMeanGrad",
32837		Input: []tf.Input{
32838			grad, indices, segment_ids, output_dim0,
32839		},
32840	}
32841	op := scope.AddOperation(opspec)
32842	return op.Output(0)
32843}
32844
32845// Computes the sum along sparse segments of a tensor divided by the sqrt of N.
32846//
32847// N is the size of the segment being reduced.
32848//
32849// See `tf.sparse.segment_sum` for usage examples.
32850//
32851//
32852// Arguments:
32853//
32854//	indices: A 1-D tensor. Has same rank as `segment_ids`.
32855//	segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
32856//
32857// Returns Has same shape as data, except for dimension 0 which
32858// has size `k`, the number of segments.
32859func SparseSegmentSqrtN(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output) (output tf.Output) {
32860	if scope.Err() != nil {
32861		return
32862	}
32863	opspec := tf.OpSpec{
32864		Type: "SparseSegmentSqrtN",
32865		Input: []tf.Input{
32866			data, indices, segment_ids,
32867		},
32868	}
32869	op := scope.AddOperation(opspec)
32870	return op.Output(0)
32871}
32872
32873// Compute the upper regularized incomplete Gamma function `Q(a, x)`.
32874//
32875// The upper regularized incomplete Gamma function is defined as:
32876//
32877// \\(Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)\\)
32878//
32879// where
32880//
32881// \\(Gamma(a, x) = int_{x}^{\infty} t^{a-1} exp(-t) dt\\)
32882//
32883// is the upper incomplete Gama function.
32884//
32885// Note, above `P(a, x)` (`Igamma`) is the lower regularized complete
32886// Gamma function.
32887func Igammac(scope *Scope, a tf.Output, x tf.Output) (z tf.Output) {
32888	if scope.Err() != nil {
32889		return
32890	}
32891	opspec := tf.OpSpec{
32892		Type: "Igammac",
32893		Input: []tf.Input{
32894			a, x,
32895		},
32896	}
32897	op := scope.AddOperation(opspec)
32898	return op.Output(0)
32899}
32900
32901// Computes the sum along sparse segments of a tensor divided by the sqrt of N.
32902//
32903// N is the size of the segment being reduced.
32904//
32905// Like `SparseSegmentSqrtN`, but allows missing ids in `segment_ids`. If an id is
32906// misisng, the `output` tensor at that position will be zeroed.
32907//
32908// Read
32909// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
32910// for an explanation of segments.
32911//
32912// Arguments:
32913//
32914//	indices: A 1-D tensor. Has same rank as `segment_ids`.
32915//	segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
32916//	num_segments: Should equal the number of distinct segment IDs.
32917//
32918// Returns Has same shape as data, except for dimension 0 which
32919// has size `k`, the number of segments.
32920func SparseSegmentSqrtNWithNumSegments(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output) {
32921	if scope.Err() != nil {
32922		return
32923	}
32924	opspec := tf.OpSpec{
32925		Type: "SparseSegmentSqrtNWithNumSegments",
32926		Input: []tf.Input{
32927			data, indices, segment_ids, num_segments,
32928		},
32929	}
32930	op := scope.AddOperation(opspec)
32931	return op.Output(0)
32932}
32933
32934// Computes gradients for SparseSegmentSqrtN.
32935//
32936// Returns tensor "output" with same shape as grad, except for dimension 0 whose
32937// value is output_dim0.
32938//
32939// Arguments:
32940//	grad: gradient propagated to the SparseSegmentSqrtN op.
32941//	indices: indices passed to the corresponding SparseSegmentSqrtN op.
32942//	segment_ids: segment_ids passed to the corresponding SparseSegmentSqrtN op.
32943//	output_dim0: dimension 0 of "data" passed to SparseSegmentSqrtN op.
32944func SparseSegmentSqrtNGrad(scope *Scope, grad tf.Output, indices tf.Output, segment_ids tf.Output, output_dim0 tf.Output) (output tf.Output) {
32945	if scope.Err() != nil {
32946		return
32947	}
32948	opspec := tf.OpSpec{
32949		Type: "SparseSegmentSqrtNGrad",
32950		Input: []tf.Input{
32951			grad, indices, segment_ids, output_dim0,
32952		},
32953	}
32954	op := scope.AddOperation(opspec)
32955	return op.Output(0)
32956}
32957
32958// LRNGradAttr is an optional argument to LRNGrad.
32959type LRNGradAttr func(optionalAttr)
32960
32961// LRNGradDepthRadius sets the optional depth_radius attribute to value.
32962//
32963// value: A depth radius.
32964// If not specified, defaults to 5
32965func LRNGradDepthRadius(value int64) LRNGradAttr {
32966	return func(m optionalAttr) {
32967		m["depth_radius"] = value
32968	}
32969}
32970
32971// LRNGradBias sets the optional bias attribute to value.
32972//
32973// value: An offset (usually > 0 to avoid dividing by 0).
32974// If not specified, defaults to 1
32975func LRNGradBias(value float32) LRNGradAttr {
32976	return func(m optionalAttr) {
32977		m["bias"] = value
32978	}
32979}
32980
32981// LRNGradAlpha sets the optional alpha attribute to value.
32982//
32983// value: A scale factor, usually positive.
32984// If not specified, defaults to 1
32985func LRNGradAlpha(value float32) LRNGradAttr {
32986	return func(m optionalAttr) {
32987		m["alpha"] = value
32988	}
32989}
32990
32991// LRNGradBeta sets the optional beta attribute to value.
32992//
32993// value: An exponent.
32994// If not specified, defaults to 0.5
32995func LRNGradBeta(value float32) LRNGradAttr {
32996	return func(m optionalAttr) {
32997		m["beta"] = value
32998	}
32999}
33000
33001// Gradients for Local Response Normalization.
33002//
33003// Arguments:
33004//	input_grads: 4-D with shape `[batch, height, width, channels]`.
33005//	input_image: 4-D with shape `[batch, height, width, channels]`.
33006//	output_image: 4-D with shape `[batch, height, width, channels]`.
33007//
33008// Returns The gradients for LRN.
33009func LRNGrad(scope *Scope, input_grads tf.Output, input_image tf.Output, output_image tf.Output, optional ...LRNGradAttr) (output tf.Output) {
33010	if scope.Err() != nil {
33011		return
33012	}
33013	attrs := map[string]interface{}{}
33014	for _, a := range optional {
33015		a(attrs)
33016	}
33017	opspec := tf.OpSpec{
33018		Type: "LRNGrad",
33019		Input: []tf.Input{
33020			input_grads, input_image, output_image,
33021		},
33022		Attrs: attrs,
33023	}
33024	op := scope.AddOperation(opspec)
33025	return op.Output(0)
33026}
33027
33028// AnyAttr is an optional argument to Any.
33029type AnyAttr func(optionalAttr)
33030
33031// AnyKeepDims sets the optional keep_dims attribute to value.
33032//
33033// value: If true, retain reduced dimensions with length 1.
33034// If not specified, defaults to false
33035func AnyKeepDims(value bool) AnyAttr {
33036	return func(m optionalAttr) {
33037		m["keep_dims"] = value
33038	}
33039}
33040
33041// Computes the "logical or" of elements across dimensions of a tensor.
33042//
33043// Reduces `input` along the dimensions given in `axis`. Unless
33044// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
33045// `axis`. If `keep_dims` is true, the reduced dimensions are
33046// retained with length 1.
33047//
33048// Arguments:
33049//	input: The tensor to reduce.
33050//	axis: The dimensions to reduce. Must be in the range
33051// `[-rank(input), rank(input))`.
33052//
33053// Returns The reduced tensor.
33054func Any(scope *Scope, input tf.Output, axis tf.Output, optional ...AnyAttr) (output tf.Output) {
33055	if scope.Err() != nil {
33056		return
33057	}
33058	attrs := map[string]interface{}{}
33059	for _, a := range optional {
33060		a(attrs)
33061	}
33062	opspec := tf.OpSpec{
33063		Type: "Any",
33064		Input: []tf.Input{
33065			input, axis,
33066		},
33067		Attrs: attrs,
33068	}
33069	op := scope.AddOperation(opspec)
33070	return op.Output(0)
33071}
33072
33073// DestroyResourceOpAttr is an optional argument to DestroyResourceOp.
33074type DestroyResourceOpAttr func(optionalAttr)
33075
33076// DestroyResourceOpIgnoreLookupError sets the optional ignore_lookup_error attribute to value.
33077//
33078// value: whether to ignore the error when the resource
33079// doesn't exist.
33080// If not specified, defaults to true
33081func DestroyResourceOpIgnoreLookupError(value bool) DestroyResourceOpAttr {
33082	return func(m optionalAttr) {
33083		m["ignore_lookup_error"] = value
33084	}
33085}
33086
33087// Deletes the resource specified by the handle.
33088//
33089// All subsequent operations using the resource will result in a NotFound
33090// error status.
33091//
33092// Arguments:
33093//	resource: handle to the resource to delete.
33094//
33095// Returns the created operation.
33096func DestroyResourceOp(scope *Scope, resource tf.Output, optional ...DestroyResourceOpAttr) (o *tf.Operation) {
33097	if scope.Err() != nil {
33098		return
33099	}
33100	attrs := map[string]interface{}{}
33101	for _, a := range optional {
33102		a(attrs)
33103	}
33104	opspec := tf.OpSpec{
33105		Type: "DestroyResourceOp",
33106		Input: []tf.Input{
33107			resource,
33108		},
33109		Attrs: attrs,
33110	}
33111	return scope.AddOperation(opspec)
33112}
33113
33114// Generates values in an interval.
33115//
33116// A sequence of `num` evenly-spaced values are generated beginning at `start`.
33117// If `num > 1`, the values in the sequence increase by `stop - start / num - 1`,
33118// so that the last one is exactly `stop`.
33119//
33120// For example:
33121//
33122// ```
33123// tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0  11.0  12.0]
33124// ```
33125//
33126// Arguments:
33127//	start: 0-D tensor. First entry in the range.
33128//	stop: 0-D tensor. Last entry in the range.
33129//	num: 0-D tensor. Number of values to generate.
33130//
33131// Returns 1-D. The generated values.
33132func LinSpace(scope *Scope, start tf.Output, stop tf.Output, num tf.Output) (output tf.Output) {
33133	if scope.Err() != nil {
33134		return
33135	}
33136	opspec := tf.OpSpec{
33137		Type: "LinSpace",
33138		Input: []tf.Input{
33139			start, stop, num,
33140		},
33141	}
33142	op := scope.AddOperation(opspec)
33143	return op.Output(0)
33144}
33145
33146// ComplexAttr is an optional argument to Complex.
33147type ComplexAttr func(optionalAttr)
33148
33149// ComplexTout sets the optional Tout attribute to value.
33150// If not specified, defaults to DT_COMPLEX64
33151func ComplexTout(value tf.DataType) ComplexAttr {
33152	return func(m optionalAttr) {
33153		m["Tout"] = value
33154	}
33155}
33156
33157// Converts two real numbers to a complex number.
33158//
33159// Given a tensor `real` representing the real part of a complex number, and a
33160// tensor `imag` representing the imaginary part of a complex number, this
33161// operation returns complex numbers elementwise of the form \\(a + bj\\), where
33162// *a* represents the `real` part and *b* represents the `imag` part.
33163//
33164// The input tensors `real` and `imag` must have the same shape.
33165//
33166// For example:
33167//
33168// ```
33169// # tensor 'real' is [2.25, 3.25]
33170// # tensor `imag` is [4.75, 5.75]
33171// tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]]
33172// ```
33173func Complex(scope *Scope, real tf.Output, imag tf.Output, optional ...ComplexAttr) (out tf.Output) {
33174	if scope.Err() != nil {
33175		return
33176	}
33177	attrs := map[string]interface{}{}
33178	for _, a := range optional {
33179		a(attrs)
33180	}
33181	opspec := tf.OpSpec{
33182		Type: "Complex",
33183		Input: []tf.Input{
33184			real, imag,
33185		},
33186		Attrs: attrs,
33187	}
33188	op := scope.AddOperation(opspec)
33189	return op.Output(0)
33190}
33191
33192// ImagAttr is an optional argument to Imag.
33193type ImagAttr func(optionalAttr)
33194
33195// ImagTout sets the optional Tout attribute to value.
33196// If not specified, defaults to DT_FLOAT
33197func ImagTout(value tf.DataType) ImagAttr {
33198	return func(m optionalAttr) {
33199		m["Tout"] = value
33200	}
33201}
33202
33203// Returns the imaginary part of a complex number.
33204//
33205// Given a tensor `input` of complex numbers, this operation returns a tensor of
33206// type `float` that is the imaginary part of each element in `input`. All
33207// elements in `input` must be complex numbers of the form \\(a + bj\\), where *a*
33208// is the real part and *b* is the imaginary part returned by this operation.
33209//
33210// For example:
33211//
33212// ```
33213// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
33214// tf.imag(input) ==> [4.75, 5.75]
33215// ```
33216func Imag(scope *Scope, input tf.Output, optional ...ImagAttr) (output tf.Output) {
33217	if scope.Err() != nil {
33218		return
33219	}
33220	attrs := map[string]interface{}{}
33221	for _, a := range optional {
33222		a(attrs)
33223	}
33224	opspec := tf.OpSpec{
33225		Type: "Imag",
33226		Input: []tf.Input{
33227			input,
33228		},
33229		Attrs: attrs,
33230	}
33231	op := scope.AddOperation(opspec)
33232	return op.Output(0)
33233}
33234
33235// Computes hyperbolic tangent of `x` element-wise.
33236func Tanh(scope *Scope, x tf.Output) (y tf.Output) {
33237	if scope.Err() != nil {
33238		return
33239	}
33240	opspec := tf.OpSpec{
33241		Type: "Tanh",
33242		Input: []tf.Input{
33243			x,
33244		},
33245	}
33246	op := scope.AddOperation(opspec)
33247	return op.Output(0)
33248}
33249
33250// Computes the maximum along segments of a tensor.
33251//
33252// Read
33253// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
33254// for an explanation of segments.
33255//
33256// Computes a tensor such that
33257// \\(output_i = \max_j(data_j)\\) where `max` is over `j` such
33258// that `segment_ids[j] == i`.
33259//
33260// If the max is empty for a given segment ID `i`, `output[i] = 0`.
33261//
33262// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
33263// <img style="width:100%" src="https://www.tensorflow.org/images/SegmentMax.png" alt>
33264// </div>
33265//
33266// For example:
33267//
33268// ```
33269// c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]])
33270// tf.segment_max(c, tf.constant([0, 0, 1]))
33271// # ==> [[4, 3, 3, 4],
33272// #      [5, 6, 7, 8]]
33273// ```
33274//
33275//
33276// Arguments:
33277//
33278//	segment_ids: A 1-D tensor whose size is equal to the size of `data`'s
33279// first dimension.  Values should be sorted and can be repeated.
33280//
33281// Returns Has same shape as data, except for dimension 0 which
33282// has size `k`, the number of segments.
33283func SegmentMax(scope *Scope, data tf.Output, segment_ids tf.Output) (output tf.Output) {
33284	if scope.Err() != nil {
33285		return
33286	}
33287	opspec := tf.OpSpec{
33288		Type: "SegmentMax",
33289		Input: []tf.Input{
33290			data, segment_ids,
33291		},
33292	}
33293	op := scope.AddOperation(opspec)
33294	return op.Output(0)
33295}
33296
33297// Creates a dataset that skips `count` elements from the `input_dataset`.
33298//
33299// Arguments:
33300//
33301//	count: A scalar representing the number of elements from the `input_dataset`
33302// that should be skipped.  If count is -1, skips everything.
33303//
33304//
33305func SkipDataset(scope *Scope, input_dataset tf.Output, count tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
33306	if scope.Err() != nil {
33307		return
33308	}
33309	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
33310	opspec := tf.OpSpec{
33311		Type: "SkipDataset",
33312		Input: []tf.Input{
33313			input_dataset, count,
33314		},
33315		Attrs: attrs,
33316	}
33317	op := scope.AddOperation(opspec)
33318	return op.Output(0)
33319}
33320
33321// VarHandleOpAttr is an optional argument to VarHandleOp.
33322type VarHandleOpAttr func(optionalAttr)
33323
33324// VarHandleOpContainer sets the optional container attribute to value.
33325//
33326// value: the container this variable is placed in.
33327// If not specified, defaults to ""
33328func VarHandleOpContainer(value string) VarHandleOpAttr {
33329	return func(m optionalAttr) {
33330		m["container"] = value
33331	}
33332}
33333
33334// VarHandleOpSharedName sets the optional shared_name attribute to value.
33335//
33336// value: the name by which this variable is referred to.
33337// If not specified, defaults to ""
33338func VarHandleOpSharedName(value string) VarHandleOpAttr {
33339	return func(m optionalAttr) {
33340		m["shared_name"] = value
33341	}
33342}
33343
33344// Creates a handle to a Variable resource.
33345//
33346// Arguments:
33347//	dtype: the type of this variable. Must agree with the dtypes
33348// of all ops using this variable.
33349//	shape: The (possibly partially specified) shape of this variable.
33350func VarHandleOp(scope *Scope, dtype tf.DataType, shape tf.Shape, optional ...VarHandleOpAttr) (resource tf.Output) {
33351	if scope.Err() != nil {
33352		return
33353	}
33354	attrs := map[string]interface{}{"dtype": dtype, "shape": shape}
33355	for _, a := range optional {
33356		a(attrs)
33357	}
33358	opspec := tf.OpSpec{
33359		Type: "VarHandleOp",
33360
33361		Attrs: attrs,
33362	}
33363	op := scope.AddOperation(opspec)
33364	return op.Output(0)
33365}
33366
33367// AngleAttr is an optional argument to Angle.
33368type AngleAttr func(optionalAttr)
33369
33370// AngleTout sets the optional Tout attribute to value.
33371// If not specified, defaults to DT_FLOAT
33372func AngleTout(value tf.DataType) AngleAttr {
33373	return func(m optionalAttr) {
33374		m["Tout"] = value
33375	}
33376}
33377
33378// Returns the argument of a complex number.
33379//
33380// Given a tensor `input` of complex numbers, this operation returns a tensor of
33381// type `float` that is the argument of each element in `input`. All elements in
33382// `input` must be complex numbers of the form \\(a + bj\\), where *a*
33383// is the real part and *b* is the imaginary part.
33384//
33385// The argument returned by this operation is of the form \\(atan2(b, a)\\).
33386//
33387// For example:
33388//
33389// ```
33390// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
33391// tf.angle(input) ==> [2.0132, 1.056]
33392// ```
33393//
33394// @compatibility(numpy)
33395// Equivalent to np.angle.
33396// @end_compatibility
33397func Angle(scope *Scope, input tf.Output, optional ...AngleAttr) (output tf.Output) {
33398	if scope.Err() != nil {
33399		return
33400	}
33401	attrs := map[string]interface{}{}
33402	for _, a := range optional {
33403		a(attrs)
33404	}
33405	opspec := tf.OpSpec{
33406		Type: "Angle",
33407		Input: []tf.Input{
33408			input,
33409		},
33410		Attrs: attrs,
33411	}
33412	op := scope.AddOperation(opspec)
33413	return op.Output(0)
33414}
33415
33416// Clips tensor values to a specified min and max.
33417//
33418// Given a tensor `t`, this operation returns a tensor of the same type and
33419// shape as `t` with its values clipped to `clip_value_min` and `clip_value_max`.
33420// Any values less than `clip_value_min` are set to `clip_value_min`. Any values
33421// greater than `clip_value_max` are set to `clip_value_max`.
33422//
33423// Arguments:
33424//	t: A `Tensor`.
33425//	clip_value_min: A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape
33426// as `t`. The minimum value to clip by.
33427//	clip_value_max: A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape
33428// as `t`. The maximum value to clip by.
33429//
33430// Returns A clipped `Tensor` with the same shape as input 't'.
33431func ClipByValue(scope *Scope, t tf.Output, clip_value_min tf.Output, clip_value_max tf.Output) (output tf.Output) {
33432	if scope.Err() != nil {
33433		return
33434	}
33435	opspec := tf.OpSpec{
33436		Type: "ClipByValue",
33437		Input: []tf.Input{
33438			t, clip_value_min, clip_value_max,
33439		},
33440	}
33441	op := scope.AddOperation(opspec)
33442	return op.Output(0)
33443}
33444
33445// Counts the number of occurrences of each value in an integer array.
33446//
33447// Outputs a vector with length `size` and the same dtype as `weights`. If
33448// `weights` are empty, then index `i` stores the number of times the value `i` is
33449// counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of
33450// the value in `weights` at each index where the corresponding value in `arr` is
33451// `i`.
33452//
33453// Values in `arr` outside of the range [0, size) are ignored.
33454//
33455// Arguments:
33456//	arr: int32 `Tensor`.
33457//	size: non-negative int32 scalar `Tensor`.
33458//	weights: is an int32, int64, float32, or float64 `Tensor` with the same
33459// shape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights
33460// equal to 1.
33461//
33462// Returns 1D `Tensor` with length equal to `size`. The counts or summed weights for
33463// each value in the range [0, size).
33464func Bincount(scope *Scope, arr tf.Output, size tf.Output, weights tf.Output) (bins tf.Output) {
33465	if scope.Err() != nil {
33466		return
33467	}
33468	opspec := tf.OpSpec{
33469		Type: "Bincount",
33470		Input: []tf.Input{
33471			arr, size, weights,
33472		},
33473	}
33474	op := scope.AddOperation(opspec)
33475	return op.Output(0)
33476}
33477
33478// CumsumAttr is an optional argument to Cumsum.
33479type CumsumAttr func(optionalAttr)
33480
33481// CumsumExclusive sets the optional exclusive attribute to value.
33482//
33483// value: If `True`, perform exclusive cumsum.
33484// If not specified, defaults to false
33485func CumsumExclusive(value bool) CumsumAttr {
33486	return func(m optionalAttr) {
33487		m["exclusive"] = value
33488	}
33489}
33490
33491// CumsumReverse sets the optional reverse attribute to value.
33492//
33493// value: A `bool` (default: False).
33494// If not specified, defaults to false
33495func CumsumReverse(value bool) CumsumAttr {
33496	return func(m optionalAttr) {
33497		m["reverse"] = value
33498	}
33499}
33500
33501// Compute the cumulative sum of the tensor `x` along `axis`.
33502//
33503// By default, this op performs an inclusive cumsum, which means that the first
33504// element of the input is identical to the first element of the output:
33505//
33506// ```python
33507// tf.cumsum([a, b, c])  # => [a, a + b, a + b + c]
33508// ```
33509//
33510// By setting the `exclusive` kwarg to `True`, an exclusive cumsum is
33511// performed instead:
33512//
33513// ```python
33514// tf.cumsum([a, b, c], exclusive=True)  # => [0, a, a + b]
33515// ```
33516//
33517// By setting the `reverse` kwarg to `True`, the cumsum is performed in the
33518// opposite direction:
33519//
33520// ```python
33521// tf.cumsum([a, b, c], reverse=True)  # => [a + b + c, b + c, c]
33522// ```
33523//
33524// This is more efficient than using separate `tf.reverse` ops.
33525//
33526// The `reverse` and `exclusive` kwargs can also be combined:
33527//
33528// ```python
33529// tf.cumsum([a, b, c], exclusive=True, reverse=True)  # => [b + c, c, 0]
33530// ```
33531//
33532// Arguments:
33533//	x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
33534// `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
33535// `complex128`, `qint8`, `quint8`, `qint32`, `half`.
33536//	axis: A `Tensor` of type `int32` (default: 0). Must be in the range
33537// `[-rank(x), rank(x))`.
33538func Cumsum(scope *Scope, x tf.Output, axis tf.Output, optional ...CumsumAttr) (out tf.Output) {
33539	if scope.Err() != nil {
33540		return
33541	}
33542	attrs := map[string]interface{}{}
33543	for _, a := range optional {
33544		a(attrs)
33545	}
33546	opspec := tf.OpSpec{
33547		Type: "Cumsum",
33548		Input: []tf.Input{
33549			x, axis,
33550		},
33551		Attrs: attrs,
33552	}
33553	op := scope.AddOperation(opspec)
33554	return op.Output(0)
33555}
33556
33557// Return the shape of s0 op s1 with broadcast.
33558//
33559// Given `s0` and `s1`, tensors that represent shapes, compute `r0`, the
33560// broadcasted shape. `s0`, `s1` and `r0` are all integer vectors.
33561func BroadcastArgs(scope *Scope, s0 tf.Output, s1 tf.Output) (r0 tf.Output) {
33562	if scope.Err() != nil {
33563		return
33564	}
33565	opspec := tf.OpSpec{
33566		Type: "BroadcastArgs",
33567		Input: []tf.Input{
33568			s0, s1,
33569		},
33570	}
33571	op := scope.AddOperation(opspec)
33572	return op.Output(0)
33573}
33574
33575// DataFormatDimMapAttr is an optional argument to DataFormatDimMap.
33576type DataFormatDimMapAttr func(optionalAttr)
33577
33578// DataFormatDimMapSrcFormat sets the optional src_format attribute to value.
33579//
33580// value: source data format.
33581// If not specified, defaults to "NHWC"
33582func DataFormatDimMapSrcFormat(value string) DataFormatDimMapAttr {
33583	return func(m optionalAttr) {
33584		m["src_format"] = value
33585	}
33586}
33587
33588// DataFormatDimMapDstFormat sets the optional dst_format attribute to value.
33589//
33590// value: destination data format.
33591// If not specified, defaults to "NCHW"
33592func DataFormatDimMapDstFormat(value string) DataFormatDimMapAttr {
33593	return func(m optionalAttr) {
33594		m["dst_format"] = value
33595	}
33596}
33597
33598// Returns the dimension index in the destination data format given the one in
33599//
33600// the source data format.
33601//
33602// Arguments:
33603//	x: A Tensor with each element as a dimension index in source data format.
33604// Must be in the range [-4, 4).
33605//
33606// Returns A Tensor with each element as a dimension index in destination data format.
33607func DataFormatDimMap(scope *Scope, x tf.Output, optional ...DataFormatDimMapAttr) (y tf.Output) {
33608	if scope.Err() != nil {
33609		return
33610	}
33611	attrs := map[string]interface{}{}
33612	for _, a := range optional {
33613		a(attrs)
33614	}
33615	opspec := tf.OpSpec{
33616		Type: "DataFormatDimMap",
33617		Input: []tf.Input{
33618			x,
33619		},
33620		Attrs: attrs,
33621	}
33622	op := scope.AddOperation(opspec)
33623	return op.Output(0)
33624}
33625
33626// CumprodAttr is an optional argument to Cumprod.
33627type CumprodAttr func(optionalAttr)
33628
33629// CumprodExclusive sets the optional exclusive attribute to value.
33630//
33631// value: If `True`, perform exclusive cumprod.
33632// If not specified, defaults to false
33633func CumprodExclusive(value bool) CumprodAttr {
33634	return func(m optionalAttr) {
33635		m["exclusive"] = value
33636	}
33637}
33638
33639// CumprodReverse sets the optional reverse attribute to value.
33640//
33641// value: A `bool` (default: False).
33642// If not specified, defaults to false
33643func CumprodReverse(value bool) CumprodAttr {
33644	return func(m optionalAttr) {
33645		m["reverse"] = value
33646	}
33647}
33648
33649// Compute the cumulative product of the tensor `x` along `axis`.
33650//
33651// By default, this op performs an inclusive cumprod, which means that the first
33652// element of the input is identical to the first element of the output:
33653//
33654// ```python
33655// tf.cumprod([a, b, c])  # => [a, a * b, a * b * c]
33656// ```
33657//
33658// By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
33659// performed instead:
33660//
33661// ```python
33662// tf.cumprod([a, b, c], exclusive=True)  # => [1, a, a * b]
33663// ```
33664//
33665// By setting the `reverse` kwarg to `True`, the cumprod is performed in the
33666// opposite direction:
33667//
33668// ```python
33669// tf.cumprod([a, b, c], reverse=True)  # => [a * b * c, b * c, c]
33670// ```
33671//
33672// This is more efficient than using separate `tf.reverse` ops.
33673//
33674// The `reverse` and `exclusive` kwargs can also be combined:
33675//
33676// ```python
33677// tf.cumprod([a, b, c], exclusive=True, reverse=True)  # => [b * c, c, 1]
33678// ```
33679//
33680// Arguments:
33681//	x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
33682// `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
33683// `complex128`, `qint8`, `quint8`, `qint32`, `half`.
33684//	axis: A `Tensor` of type `int32` (default: 0). Must be in the range
33685// `[-rank(x), rank(x))`.
33686func Cumprod(scope *Scope, x tf.Output, axis tf.Output, optional ...CumprodAttr) (out tf.Output) {
33687	if scope.Err() != nil {
33688		return
33689	}
33690	attrs := map[string]interface{}{}
33691	for _, a := range optional {
33692		a(attrs)
33693	}
33694	opspec := tf.OpSpec{
33695		Type: "Cumprod",
33696		Input: []tf.Input{
33697			x, axis,
33698		},
33699		Attrs: attrs,
33700	}
33701	op := scope.AddOperation(opspec)
33702	return op.Output(0)
33703}
33704
33705// RetrieveTPUEmbeddingStochasticGradientDescentParametersAttr is an optional argument to RetrieveTPUEmbeddingStochasticGradientDescentParameters.
33706type RetrieveTPUEmbeddingStochasticGradientDescentParametersAttr func(optionalAttr)
33707
33708// RetrieveTPUEmbeddingStochasticGradientDescentParametersTableId sets the optional table_id attribute to value.
33709// If not specified, defaults to -1
33710//
33711// REQUIRES: value >= -1
33712func RetrieveTPUEmbeddingStochasticGradientDescentParametersTableId(value int64) RetrieveTPUEmbeddingStochasticGradientDescentParametersAttr {
33713	return func(m optionalAttr) {
33714		m["table_id"] = value
33715	}
33716}
33717
33718// RetrieveTPUEmbeddingStochasticGradientDescentParametersTableName sets the optional table_name attribute to value.
33719// If not specified, defaults to ""
33720func RetrieveTPUEmbeddingStochasticGradientDescentParametersTableName(value string) RetrieveTPUEmbeddingStochasticGradientDescentParametersAttr {
33721	return func(m optionalAttr) {
33722		m["table_name"] = value
33723	}
33724}
33725
33726// Retrieve SGD embedding parameters.
33727//
33728// An op that retrieves optimization parameters from embedding to host
33729// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
33730// the correct embedding table configuration. For example, this op is
33731// used to retrieve updated parameters before saving a checkpoint.
33732//
33733// Returns Parameter parameters updated by the stochastic gradient descent optimization algorithm.
33734func RetrieveTPUEmbeddingStochasticGradientDescentParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingStochasticGradientDescentParametersAttr) (parameters tf.Output) {
33735	if scope.Err() != nil {
33736		return
33737	}
33738	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
33739	for _, a := range optional {
33740		a(attrs)
33741	}
33742	opspec := tf.OpSpec{
33743		Type: "RetrieveTPUEmbeddingStochasticGradientDescentParameters",
33744
33745		Attrs: attrs,
33746	}
33747	op := scope.AddOperation(opspec)
33748	return op.Output(0)
33749}
33750
33751// QuantizedMulAttr is an optional argument to QuantizedMul.
33752type QuantizedMulAttr func(optionalAttr)
33753
33754// QuantizedMulToutput sets the optional Toutput attribute to value.
33755// If not specified, defaults to DT_QINT32
33756func QuantizedMulToutput(value tf.DataType) QuantizedMulAttr {
33757	return func(m optionalAttr) {
33758		m["Toutput"] = value
33759	}
33760}
33761
33762// Returns x * y element-wise, working on quantized buffers.
33763//
33764// Arguments:
33765//
33766//
33767//	min_x: The float value that the lowest quantized `x` value represents.
33768//	max_x: The float value that the highest quantized `x` value represents.
33769//	min_y: The float value that the lowest quantized `y` value represents.
33770//	max_y: The float value that the highest quantized `y` value represents.
33771//
33772// Returns The float value that the lowest quantized output value represents.The float value that the highest quantized output value represents.
33773//
33774// *NOTE*: `QuantizedMul` supports limited forms of broadcasting. More about
33775// broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
33776func QuantizedMul(scope *Scope, x tf.Output, y tf.Output, min_x tf.Output, max_x tf.Output, min_y tf.Output, max_y tf.Output, optional ...QuantizedMulAttr) (z tf.Output, min_z tf.Output, max_z tf.Output) {
33777	if scope.Err() != nil {
33778		return
33779	}
33780	attrs := map[string]interface{}{}
33781	for _, a := range optional {
33782		a(attrs)
33783	}
33784	opspec := tf.OpSpec{
33785		Type: "QuantizedMul",
33786		Input: []tf.Input{
33787			x, y, min_x, max_x, min_y, max_y,
33788		},
33789		Attrs: attrs,
33790	}
33791	op := scope.AddOperation(opspec)
33792	return op.Output(0), op.Output(1), op.Output(2)
33793}
33794
33795// QuantizedAddAttr is an optional argument to QuantizedAdd.
33796type QuantizedAddAttr func(optionalAttr)
33797
33798// QuantizedAddToutput sets the optional Toutput attribute to value.
33799// If not specified, defaults to DT_QINT32
33800func QuantizedAddToutput(value tf.DataType) QuantizedAddAttr {
33801	return func(m optionalAttr) {
33802		m["Toutput"] = value
33803	}
33804}
33805
33806// Returns x + y element-wise, working on quantized buffers.
33807//
33808// Arguments:
33809//
33810//
33811//	min_x: The float value that the lowest quantized `x` value represents.
33812//	max_x: The float value that the highest quantized `x` value represents.
33813//	min_y: The float value that the lowest quantized `y` value represents.
33814//	max_y: The float value that the highest quantized `y` value represents.
33815//
33816// Returns The float value that the lowest quantized output value represents.The float value that the highest quantized output value represents.
33817//
33818// *NOTE*: `QuantizedAdd` supports limited forms of broadcasting. More about
33819// broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
33820func QuantizedAdd(scope *Scope, x tf.Output, y tf.Output, min_x tf.Output, max_x tf.Output, min_y tf.Output, max_y tf.Output, optional ...QuantizedAddAttr) (z tf.Output, min_z tf.Output, max_z tf.Output) {
33821	if scope.Err() != nil {
33822		return
33823	}
33824	attrs := map[string]interface{}{}
33825	for _, a := range optional {
33826		a(attrs)
33827	}
33828	opspec := tf.OpSpec{
33829		Type: "QuantizedAdd",
33830		Input: []tf.Input{
33831			x, y, min_x, max_x, min_y, max_y,
33832		},
33833		Attrs: attrs,
33834	}
33835	op := scope.AddOperation(opspec)
33836	return op.Output(0), op.Output(1), op.Output(2)
33837}
33838
33839// Scatters tensor at indices in an input list.
33840//
33841// Each member of the TensorList corresponds to one row of the input tensor,
33842// specified by the given index (see `tf.gather`).
33843//
33844// input_handle: The list to scatter into.
33845// tensor: The input tensor.
33846// indices: The indices used to index into the list.
33847// output_handle: The TensorList.
33848func TensorListScatterIntoExistingList(scope *Scope, input_handle tf.Output, tensor tf.Output, indices tf.Output) (output_handle tf.Output) {
33849	if scope.Err() != nil {
33850		return
33851	}
33852	opspec := tf.OpSpec{
33853		Type: "TensorListScatterIntoExistingList",
33854		Input: []tf.Input{
33855			input_handle, tensor, indices,
33856		},
33857	}
33858	op := scope.AddOperation(opspec)
33859	return op.Output(0)
33860}
33861
33862// Computes a range that covers the actual values present in a quantized tensor.
33863//
33864// Given a quantized tensor described by `(input, input_min, input_max)`, outputs a
33865// range that covers the actual values present in that tensor. This op is typically
33866// used to produce the `requested_output_min` and `requested_output_max` for
33867// `Requantize`.
33868//
33869// Arguments:
33870//
33871//	input_min: The float value that the minimum quantized input value represents.
33872//	input_max: The float value that the maximum quantized input value represents.
33873//
33874// Returns The computed min output.the computed max output.
33875func RequantizationRange(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output) (output_min tf.Output, output_max tf.Output) {
33876	if scope.Err() != nil {
33877		return
33878	}
33879	opspec := tf.OpSpec{
33880		Type: "RequantizationRange",
33881		Input: []tf.Input{
33882			input, input_min, input_max,
33883		},
33884	}
33885	op := scope.AddOperation(opspec)
33886	return op.Output(0), op.Output(1)
33887}
33888
33889// Rolls the elements of a tensor along an axis.
33890//
33891// The elements are shifted positively (towards larger indices) by the offset of
33892// `shift` along the dimension of `axis`. Negative `shift` values will shift
33893// elements in the opposite direction. Elements that roll passed the last position
33894// will wrap around to the first and vice versa. Multiple shifts along multiple
33895// axes may be specified.
33896//
33897// For example:
33898//
33899// ```
33900// # 't' is [0, 1, 2, 3, 4]
33901// roll(t, shift=2, axis=0) ==> [3, 4, 0, 1, 2]
33902//
33903// # shifting along multiple dimensions
33904// # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
33905// roll(t, shift=[1, -2], axis=[0, 1]) ==> [[7, 8, 9, 5, 6], [2, 3, 4, 0, 1]]
33906//
33907// # shifting along the same axis multiple times
33908// # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
33909// roll(t, shift=[2, -3], axis=[1, 1]) ==> [[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]]
33910// ```
33911//
33912// Arguments:
33913//
33914//	shift: Dimension must be 0-D or 1-D. `shift[i]` specifies the number of places by which
33915// elements are shifted positively (towards larger indices) along the dimension
33916// specified by `axis[i]`. Negative shifts will roll the elements in the opposite
33917// direction.
33918//	axis: Dimension must be 0-D or 1-D. `axis[i]` specifies the dimension that the shift
33919// `shift[i]` should occur. If the same axis is referenced more than once, the
33920// total shift for that axis will be the sum of all the shifts that belong to that
33921// axis.
33922//
33923// Returns Has the same shape and size as the input. The elements are shifted
33924// positively (towards larger indices) by the offsets of `shift` along the
33925// dimensions of `axis`.
33926func Roll(scope *Scope, input tf.Output, shift tf.Output, axis tf.Output) (output tf.Output) {
33927	if scope.Err() != nil {
33928		return
33929	}
33930	opspec := tf.OpSpec{
33931		Type: "Roll",
33932		Input: []tf.Input{
33933			input, shift, axis,
33934		},
33935	}
33936	op := scope.AddOperation(opspec)
33937	return op.Output(0)
33938}
33939
33940// Updates the table to associates keys with values.
33941//
33942// The tensor `keys` must be of the same type as the keys of the table.
33943// The tensor `values` must be of the type of the table values.
33944//
33945// Arguments:
33946//	table_handle: Handle to the table.
33947//	keys: Any shape.  Keys to look up.
33948//	values: Values to associate with keys.
33949//
33950// Returns the created operation.
33951func LookupTableInsertV2(scope *Scope, table_handle tf.Output, keys tf.Output, values tf.Output) (o *tf.Operation) {
33952	if scope.Err() != nil {
33953		return
33954	}
33955	opspec := tf.OpSpec{
33956		Type: "LookupTableInsertV2",
33957		Input: []tf.Input{
33958			table_handle, keys, values,
33959		},
33960	}
33961	return scope.AddOperation(opspec)
33962}
33963
33964// Creates a `Dataset` that includes only 1/`num_shards` of this dataset.
33965//
33966// Arguments:
33967//
33968//	num_shards: An integer representing the number of shards operating in parallel.
33969//	index: An integer representing the current worker index.
33970//
33971//
33972func ShardDataset(scope *Scope, input_dataset tf.Output, num_shards tf.Output, index tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
33973	if scope.Err() != nil {
33974		return
33975	}
33976	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
33977	opspec := tf.OpSpec{
33978		Type: "ShardDataset",
33979		Input: []tf.Input{
33980			input_dataset, num_shards, index,
33981		},
33982		Attrs: attrs,
33983	}
33984	op := scope.AddOperation(opspec)
33985	return op.Output(0)
33986}
33987
33988// Creates a dataset that batches and pads `batch_size` elements from the input.
33989//
33990// Arguments:
33991//
33992//	batch_size: A scalar representing the number of elements to accumulate in a
33993// batch.
33994//	padded_shapes: A list of int64 tensors representing the desired padded shapes
33995// of the corresponding output components. These shapes may be partially
33996// specified, using `-1` to indicate that a particular dimension should be
33997// padded to the maximum size of all batch elements.
33998//	padding_values: A list of scalars containing the padding value to use for
33999// each of the outputs.
34000//	drop_remainder: A scalar representing whether the last batch should be dropped in case its size
34001// is smaller than desired.
34002//
34003func PaddedBatchDatasetV2(scope *Scope, input_dataset tf.Output, batch_size tf.Output, padded_shapes []tf.Output, padding_values []tf.Output, drop_remainder tf.Output, output_shapes []tf.Shape) (handle tf.Output) {
34004	if scope.Err() != nil {
34005		return
34006	}
34007	attrs := map[string]interface{}{"output_shapes": output_shapes}
34008	opspec := tf.OpSpec{
34009		Type: "PaddedBatchDatasetV2",
34010		Input: []tf.Input{
34011			input_dataset, batch_size, tf.OutputList(padded_shapes), tf.OutputList(padding_values), drop_remainder,
34012		},
34013		Attrs: attrs,
34014	}
34015	op := scope.AddOperation(opspec)
34016	return op.Output(0)
34017}
34018
34019// Returns element-wise smallest integer not less than x.
34020func Ceil(scope *Scope, x tf.Output) (y tf.Output) {
34021	if scope.Err() != nil {
34022		return
34023	}
34024	opspec := tf.OpSpec{
34025		Type: "Ceil",
34026		Input: []tf.Input{
34027			x,
34028		},
34029	}
34030	op := scope.AddOperation(opspec)
34031	return op.Output(0)
34032}
34033
34034// Computes the number of elements in the given table.
34035//
34036// Arguments:
34037//	table_handle: Handle to the table.
34038//
34039// Returns Scalar that contains number of elements in the table.
34040func LookupTableSizeV2(scope *Scope, table_handle tf.Output) (size tf.Output) {
34041	if scope.Err() != nil {
34042		return
34043	}
34044	opspec := tf.OpSpec{
34045		Type: "LookupTableSizeV2",
34046		Input: []tf.Input{
34047			table_handle,
34048		},
34049	}
34050	op := scope.AddOperation(opspec)
34051	return op.Output(0)
34052}
34053
34054// ResizeBilinearGradAttr is an optional argument to ResizeBilinearGrad.
34055type ResizeBilinearGradAttr func(optionalAttr)
34056
34057// ResizeBilinearGradAlignCorners sets the optional align_corners attribute to value.
34058//
34059// value: If true, the centers of the 4 corner pixels of the input and grad tensors are
34060// aligned. Defaults to false.
34061// If not specified, defaults to false
34062func ResizeBilinearGradAlignCorners(value bool) ResizeBilinearGradAttr {
34063	return func(m optionalAttr) {
34064		m["align_corners"] = value
34065	}
34066}
34067
34068// ResizeBilinearGradHalfPixelCenters sets the optional half_pixel_centers attribute to value.
34069// If not specified, defaults to false
34070func ResizeBilinearGradHalfPixelCenters(value bool) ResizeBilinearGradAttr {
34071	return func(m optionalAttr) {
34072		m["half_pixel_centers"] = value
34073	}
34074}
34075
34076// Computes the gradient of bilinear interpolation.
34077//
34078// Arguments:
34079//	grads: 4-D with shape `[batch, height, width, channels]`.
34080//	original_image: 4-D with shape `[batch, orig_height, orig_width, channels]`,
34081// The image tensor that was resized.
34082//
34083// Returns 4-D with shape `[batch, orig_height, orig_width, channels]`.
34084// Gradients with respect to the input image. Input image must have been
34085// float or double.
34086func ResizeBilinearGrad(scope *Scope, grads tf.Output, original_image tf.Output, optional ...ResizeBilinearGradAttr) (output tf.Output) {
34087	if scope.Err() != nil {
34088		return
34089	}
34090	attrs := map[string]interface{}{}
34091	for _, a := range optional {
34092		a(attrs)
34093	}
34094	opspec := tf.OpSpec{
34095		Type: "ResizeBilinearGrad",
34096		Input: []tf.Input{
34097			grads, original_image,
34098		},
34099		Attrs: attrs,
34100	}
34101	op := scope.AddOperation(opspec)
34102	return op.Output(0)
34103}
34104
34105// Outputs all keys and values in the table.
34106//
34107// Arguments:
34108//	table_handle: Handle to the table.
34109//
34110//
34111//
34112// Returns Vector of all keys present in the table.Tensor of all values in the table. Indexed in parallel with `keys`.
34113func LookupTableExportV2(scope *Scope, table_handle tf.Output, Tkeys tf.DataType, Tvalues tf.DataType) (keys tf.Output, values tf.Output) {
34114	if scope.Err() != nil {
34115		return
34116	}
34117	attrs := map[string]interface{}{"Tkeys": Tkeys, "Tvalues": Tvalues}
34118	opspec := tf.OpSpec{
34119		Type: "LookupTableExportV2",
34120		Input: []tf.Input{
34121			table_handle,
34122		},
34123		Attrs: attrs,
34124	}
34125	op := scope.AddOperation(opspec)
34126	return op.Output(0), op.Output(1)
34127}
34128
34129// MultiDeviceIteratorFromStringHandleAttr is an optional argument to MultiDeviceIteratorFromStringHandle.
34130type MultiDeviceIteratorFromStringHandleAttr func(optionalAttr)
34131
34132// MultiDeviceIteratorFromStringHandleOutputTypes sets the optional output_types attribute to value.
34133//
34134// value: The type list for the return values.
34135// If not specified, defaults to <>
34136//
34137// REQUIRES: len(value) >= 0
34138func MultiDeviceIteratorFromStringHandleOutputTypes(value []tf.DataType) MultiDeviceIteratorFromStringHandleAttr {
34139	return func(m optionalAttr) {
34140		m["output_types"] = value
34141	}
34142}
34143
34144// MultiDeviceIteratorFromStringHandleOutputShapes sets the optional output_shapes attribute to value.
34145//
34146// value: The list of shapes being produced.
34147// If not specified, defaults to <>
34148//
34149// REQUIRES: len(value) >= 0
34150func MultiDeviceIteratorFromStringHandleOutputShapes(value []tf.Shape) MultiDeviceIteratorFromStringHandleAttr {
34151	return func(m optionalAttr) {
34152		m["output_shapes"] = value
34153	}
34154}
34155
34156// Generates a MultiDeviceIterator resource from its provided string handle.
34157//
34158// Arguments:
34159//	string_handle: String representing the resource.
34160//
34161// Returns A MultiDeviceIterator resource.
34162func MultiDeviceIteratorFromStringHandle(scope *Scope, string_handle tf.Output, optional ...MultiDeviceIteratorFromStringHandleAttr) (multi_device_iterator tf.Output) {
34163	if scope.Err() != nil {
34164		return
34165	}
34166	attrs := map[string]interface{}{}
34167	for _, a := range optional {
34168		a(attrs)
34169	}
34170	opspec := tf.OpSpec{
34171		Type: "MultiDeviceIteratorFromStringHandle",
34172		Input: []tf.Input{
34173			string_handle,
34174		},
34175		Attrs: attrs,
34176	}
34177	op := scope.AddOperation(opspec)
34178	return op.Output(0)
34179}
34180
34181// MutableHashTableV2Attr is an optional argument to MutableHashTableV2.
34182type MutableHashTableV2Attr func(optionalAttr)
34183
34184// MutableHashTableV2Container sets the optional container attribute to value.
34185//
34186// value: If non-empty, this table is placed in the given container.
34187// Otherwise, a default container is used.
34188// If not specified, defaults to ""
34189func MutableHashTableV2Container(value string) MutableHashTableV2Attr {
34190	return func(m optionalAttr) {
34191		m["container"] = value
34192	}
34193}
34194
34195// MutableHashTableV2SharedName sets the optional shared_name attribute to value.
34196//
34197// value: If non-empty, this table is shared under the given name across
34198// multiple sessions.
34199// If not specified, defaults to ""
34200func MutableHashTableV2SharedName(value string) MutableHashTableV2Attr {
34201	return func(m optionalAttr) {
34202		m["shared_name"] = value
34203	}
34204}
34205
34206// MutableHashTableV2UseNodeNameSharing sets the optional use_node_name_sharing attribute to value.
34207//
34208// value: If true and shared_name is empty, the table is shared
34209// using the node name.
34210// If not specified, defaults to false
34211func MutableHashTableV2UseNodeNameSharing(value bool) MutableHashTableV2Attr {
34212	return func(m optionalAttr) {
34213		m["use_node_name_sharing"] = value
34214	}
34215}
34216
34217// Creates an empty hash table.
34218//
34219// This op creates a mutable hash table, specifying the type of its keys and
34220// values. Each value must be a scalar. Data can be inserted into the table using
34221// the insert operations. It does not support the initialization operation.
34222//
34223// Arguments:
34224//	key_dtype: Type of the table keys.
34225//	value_dtype: Type of the table values.
34226//
34227// Returns Handle to a table.
34228func MutableHashTableV2(scope *Scope, key_dtype tf.DataType, value_dtype tf.DataType, optional ...MutableHashTableV2Attr) (table_handle tf.Output) {
34229	if scope.Err() != nil {
34230		return
34231	}
34232	attrs := map[string]interface{}{"key_dtype": key_dtype, "value_dtype": value_dtype}
34233	for _, a := range optional {
34234		a(attrs)
34235	}
34236	opspec := tf.OpSpec{
34237		Type: "MutableHashTableV2",
34238
34239		Attrs: attrs,
34240	}
34241	op := scope.AddOperation(opspec)
34242	return op.Output(0)
34243}
34244
34245// DequantizeAttr is an optional argument to Dequantize.
34246type DequantizeAttr func(optionalAttr)
34247
34248// DequantizeMode sets the optional mode attribute to value.
34249// If not specified, defaults to "MIN_COMBINED"
34250func DequantizeMode(value string) DequantizeAttr {
34251	return func(m optionalAttr) {
34252		m["mode"] = value
34253	}
34254}
34255
34256// Dequantize the 'input' tensor into a float Tensor.
34257//
34258// [min_range, max_range] are scalar floats that specify the range for
34259// the 'input' data. The 'mode' attribute controls exactly which calculations are
34260// used to convert the float values to their quantized equivalents.
34261//
34262// In 'MIN_COMBINED' mode, each value of the tensor will undergo the following:
34263//
34264// ```
34265// if T == qint8: in[i] += (range(T) + 1)/ 2.0
34266// out[i] = min_range + (in[i]* (max_range - min_range) / range(T))
34267// ```
34268// here `range(T) = numeric_limits<T>::max() - numeric_limits<T>::min()`
34269//
34270// *MIN_COMBINED Mode Example*
34271//
34272// If the input comes from a QuantizedRelu6, the output type is
34273// quint8 (range of 0-255) but the possible range of QuantizedRelu6 is
34274// 0-6.  The min_range and max_range values are therefore 0.0 and 6.0.
34275// Dequantize on quint8 will take each value, cast to float, and multiply
34276// by 6 / 255.
34277// Note that if quantizedtype is qint8, the operation will additionally add
34278// each value by 128 prior to casting.
34279//
34280// If the mode is 'MIN_FIRST', then this approach is used:
34281//
34282// ```c++
34283// num_discrete_values = 1 << (# of bits in T)
34284// range_adjust = num_discrete_values / (num_discrete_values - 1)
34285// range = (range_max - range_min) * range_adjust
34286// range_scale = range / num_discrete_values
34287// const double offset_input = static_cast<double>(input) - lowest_quantized;
34288// result = range_min + ((input - numeric_limits<T>::min()) * range_scale)
34289// ```
34290//
34291// *SCALED mode Example*
34292//
34293// `SCALED` mode matches the quantization approach used in
34294// `QuantizeAndDequantize{V2|V3}`.
34295//
34296// If the mode is `SCALED`, we do not use the full range of the output type,
34297// choosing to elide the lowest possible value for symmetry (e.g., output range is
34298// -127 to 127, not -128 to 127 for signed 8 bit quantization), so that 0.0 maps to
34299// 0.
34300//
34301// We first find the range of values in our tensor. The
34302// range we use is always centered on 0, so we find m such that
34303// ```c++
34304//   m = max(abs(input_min), abs(input_max))
34305// ```
34306//
34307// Our input tensor range is then `[-m, m]`.
34308//
34309// Next, we choose our fixed-point quantization buckets, `[min_fixed, max_fixed]`.
34310// If T is signed, this is
34311// ```
34312//   num_bits = sizeof(T) * 8
34313//   [min_fixed, max_fixed] =
34314//       [-(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - 1]
34315// ```
34316//
34317// Otherwise, if T is unsigned, the fixed-point range is
34318// ```
34319//   [min_fixed, max_fixed] = [0, (1 << num_bits) - 1]
34320// ```
34321//
34322// From this we compute our scaling factor, s:
34323// ```c++
34324//   s = (2 * m) / (max_fixed - min_fixed)
34325// ```
34326//
34327// Now we can dequantize the elements of our tensor:
34328// ```c++
34329// result = input * s
34330// ```
34331//
34332// Arguments:
34333//
34334//	min_range: The minimum scalar value possibly produced for the input.
34335//	max_range: The maximum scalar value possibly produced for the input.
34336func Dequantize(scope *Scope, input tf.Output, min_range tf.Output, max_range tf.Output, optional ...DequantizeAttr) (output tf.Output) {
34337	if scope.Err() != nil {
34338		return
34339	}
34340	attrs := map[string]interface{}{}
34341	for _, a := range optional {
34342		a(attrs)
34343	}
34344	opspec := tf.OpSpec{
34345		Type: "Dequantize",
34346		Input: []tf.Input{
34347			input, min_range, max_range,
34348		},
34349		Attrs: attrs,
34350	}
34351	op := scope.AddOperation(opspec)
34352	return op.Output(0)
34353}
34354
34355// Flips all bits elementwise.
34356//
34357// The result will have exactly those bits set, that are not set in `x`. The
34358// computation is performed on the underlying representation of x.
34359func Invert(scope *Scope, x tf.Output) (y tf.Output) {
34360	if scope.Err() != nil {
34361		return
34362	}
34363	opspec := tf.OpSpec{
34364		Type: "Invert",
34365		Input: []tf.Input{
34366			x,
34367		},
34368	}
34369	op := scope.AddOperation(opspec)
34370	return op.Output(0)
34371}
34372
34373// Deserialize bucket boundaries and ready flag into current QuantileAccumulator.
34374//
34375// An op that deserializes bucket boundaries and are boundaries ready flag into current QuantileAccumulator.
34376//
34377// Arguments:
34378//	quantile_stream_resource_handle: resource handle referring to a QuantileStreamResource.
34379//	bucket_boundaries: float; List of Rank 1 Tensors each containing the bucket boundaries for a feature.
34380//
34381// Returns the created operation.
34382func BoostedTreesQuantileStreamResourceDeserialize(scope *Scope, quantile_stream_resource_handle tf.Output, bucket_boundaries []tf.Output) (o *tf.Operation) {
34383	if scope.Err() != nil {
34384		return
34385	}
34386	opspec := tf.OpSpec{
34387		Type: "BoostedTreesQuantileStreamResourceDeserialize",
34388		Input: []tf.Input{
34389			quantile_stream_resource_handle, tf.OutputList(bucket_boundaries),
34390		},
34391	}
34392	return scope.AddOperation(opspec)
34393}
34394
34395// Inverse 3D fast Fourier transform.
34396//
34397// Computes the inverse 3-dimensional discrete Fourier transform over the
34398// inner-most 3 dimensions of `input`.
34399//
34400// Arguments:
34401//	input: A complex64 tensor.
34402//
34403// Returns A complex64 tensor of the same shape as `input`. The inner-most 3
34404//   dimensions of `input` are replaced with their inverse 3D Fourier transform.
34405//
34406// @compatibility(numpy)
34407// Equivalent to np.fft.ifftn with 3 dimensions.
34408// @end_compatibility
34409func IFFT3D(scope *Scope, input tf.Output) (output tf.Output) {
34410	if scope.Err() != nil {
34411		return
34412	}
34413	opspec := tf.OpSpec{
34414		Type: "IFFT3D",
34415		Input: []tf.Input{
34416			input,
34417		},
34418	}
34419	op := scope.AddOperation(opspec)
34420	return op.Output(0)
34421}
34422
34423// Shuts down a running distributed TPU system.
34424//
34425// The op returns an error if no system is running.
34426//
34427// Returns the created operation.
34428func ShutdownDistributedTPU(scope *Scope) (o *tf.Operation) {
34429	if scope.Err() != nil {
34430		return
34431	}
34432	opspec := tf.OpSpec{
34433		Type: "ShutdownDistributedTPU",
34434	}
34435	return scope.AddOperation(opspec)
34436}
34437
34438// Deprecated. Disallowed in GraphDef version >= 2.
34439//
34440// DEPRECATED at GraphDef version 2: Use AdjustContrastv2 instead
34441func AdjustContrast(scope *Scope, images tf.Output, contrast_factor tf.Output, min_value tf.Output, max_value tf.Output) (output tf.Output) {
34442	if scope.Err() != nil {
34443		return
34444	}
34445	opspec := tf.OpSpec{
34446		Type: "AdjustContrast",
34447		Input: []tf.Input{
34448			images, contrast_factor, min_value, max_value,
34449		},
34450	}
34451	op := scope.AddOperation(opspec)
34452	return op.Output(0)
34453}
34454
34455// Table initializer that takes two tensors for keys and values respectively.
34456//
34457// Arguments:
34458//	table_handle: Handle to a table which will be initialized.
34459//	keys: Keys of type Tkey.
34460//	values: Values of type Tval.
34461//
34462// Returns the created operation.
34463func InitializeTableV2(scope *Scope, table_handle tf.Output, keys tf.Output, values tf.Output) (o *tf.Operation) {
34464	if scope.Err() != nil {
34465		return
34466	}
34467	opspec := tf.OpSpec{
34468		Type: "InitializeTableV2",
34469		Input: []tf.Input{
34470			table_handle, keys, values,
34471		},
34472	}
34473	return scope.AddOperation(opspec)
34474}
34475
34476// PrintAttr is an optional argument to Print.
34477type PrintAttr func(optionalAttr)
34478
34479// PrintMessage sets the optional message attribute to value.
34480//
34481// value: A string, prefix of the error message.
34482// If not specified, defaults to ""
34483func PrintMessage(value string) PrintAttr {
34484	return func(m optionalAttr) {
34485		m["message"] = value
34486	}
34487}
34488
34489// PrintFirstN sets the optional first_n attribute to value.
34490//
34491// value: Only log `first_n` number of times. -1 disables logging.
34492// If not specified, defaults to -1
34493func PrintFirstN(value int64) PrintAttr {
34494	return func(m optionalAttr) {
34495		m["first_n"] = value
34496	}
34497}
34498
34499// PrintSummarize sets the optional summarize attribute to value.
34500//
34501// value: Only print this many entries of each tensor.
34502// If not specified, defaults to 3
34503func PrintSummarize(value int64) PrintAttr {
34504	return func(m optionalAttr) {
34505		m["summarize"] = value
34506	}
34507}
34508
34509// Prints a list of tensors.
34510//
34511// Passes `input` through to `output` and prints `data` when evaluating.
34512//
34513// Arguments:
34514//	input: The tensor passed to `output`
34515//	data: A list of tensors to print out when op is evaluated.
34516//
34517// Returns = The unmodified `input` tensor
34518func Print(scope *Scope, input tf.Output, data []tf.Output, optional ...PrintAttr) (output tf.Output) {
34519	if scope.Err() != nil {
34520		return
34521	}
34522	attrs := map[string]interface{}{}
34523	for _, a := range optional {
34524		a(attrs)
34525	}
34526	opspec := tf.OpSpec{
34527		Type: "Print",
34528		Input: []tf.Input{
34529			input, tf.OutputList(data),
34530		},
34531		Attrs: attrs,
34532	}
34533	op := scope.AddOperation(opspec)
34534	return op.Output(0)
34535}
34536
34537// Outputs a `Summary` protocol buffer with a tensor and per-plugin data.
34538//
34539// Arguments:
34540//	tag: A string attached to this summary. Used for organization in TensorBoard.
34541//	tensor: A tensor to serialize.
34542//	serialized_summary_metadata: A serialized SummaryMetadata proto. Contains plugin
34543// data.
34544func TensorSummaryV2(scope *Scope, tag tf.Output, tensor tf.Output, serialized_summary_metadata tf.Output) (summary tf.Output) {
34545	if scope.Err() != nil {
34546		return
34547	}
34548	opspec := tf.OpSpec{
34549		Type: "TensorSummaryV2",
34550		Input: []tf.Input{
34551			tag, tensor, serialized_summary_metadata,
34552		},
34553	}
34554	op := scope.AddOperation(opspec)
34555	return op.Output(0)
34556}
34557
34558// Creates a dataset that asynchronously prefetches elements from `input_dataset`.
34559//
34560// Arguments:
34561//
34562//	buffer_size: The maximum number of elements to buffer in an iterator over
34563// this dataset.
34564//
34565//
34566func PrefetchDataset(scope *Scope, input_dataset tf.Output, buffer_size tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
34567	if scope.Err() != nil {
34568		return
34569	}
34570	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
34571	opspec := tf.OpSpec{
34572		Type: "PrefetchDataset",
34573		Input: []tf.Input{
34574			input_dataset, buffer_size,
34575		},
34576		Attrs: attrs,
34577	}
34578	op := scope.AddOperation(opspec)
34579	return op.Output(0)
34580}
34581
34582// TensorSummaryAttr is an optional argument to TensorSummary.
34583type TensorSummaryAttr func(optionalAttr)
34584
34585// TensorSummaryDescription sets the optional description attribute to value.
34586//
34587// value: A json-encoded SummaryDescription proto.
34588// If not specified, defaults to ""
34589func TensorSummaryDescription(value string) TensorSummaryAttr {
34590	return func(m optionalAttr) {
34591		m["description"] = value
34592	}
34593}
34594
34595// TensorSummaryLabels sets the optional labels attribute to value.
34596//
34597// value: An unused list of strings.
34598// If not specified, defaults to <>
34599func TensorSummaryLabels(value []string) TensorSummaryAttr {
34600	return func(m optionalAttr) {
34601		m["labels"] = value
34602	}
34603}
34604
34605// TensorSummaryDisplayName sets the optional display_name attribute to value.
34606//
34607// value: An unused string.
34608// If not specified, defaults to ""
34609func TensorSummaryDisplayName(value string) TensorSummaryAttr {
34610	return func(m optionalAttr) {
34611		m["display_name"] = value
34612	}
34613}
34614
34615// Outputs a `Summary` protocol buffer with a tensor.
34616//
34617// This op is being phased out in favor of TensorSummaryV2, which lets callers pass
34618// a tag as well as a serialized SummaryMetadata proto string that contains
34619// plugin-specific data. We will keep this op to maintain backwards compatibility.
34620//
34621// Arguments:
34622//	tensor: A tensor to serialize.
34623func TensorSummary(scope *Scope, tensor tf.Output, optional ...TensorSummaryAttr) (summary tf.Output) {
34624	if scope.Err() != nil {
34625		return
34626	}
34627	attrs := map[string]interface{}{}
34628	for _, a := range optional {
34629		a(attrs)
34630	}
34631	opspec := tf.OpSpec{
34632		Type: "TensorSummary",
34633		Input: []tf.Input{
34634			tensor,
34635		},
34636		Attrs: attrs,
34637	}
34638	op := scope.AddOperation(opspec)
34639	return op.Output(0)
34640}
34641
34642// Read an element from the TensorArray into output `value`.
34643//
34644// Arguments:
34645//	handle: The handle to a TensorArray.
34646//
34647//	flow_in: A float scalar that enforces proper chaining of operations.
34648//	dtype: The type of the elem that is returned.
34649//
34650// Returns The tensor that is read from the TensorArray.
34651func TensorArrayReadV3(scope *Scope, handle tf.Output, index tf.Output, flow_in tf.Output, dtype tf.DataType) (value tf.Output) {
34652	if scope.Err() != nil {
34653		return
34654	}
34655	attrs := map[string]interface{}{"dtype": dtype}
34656	opspec := tf.OpSpec{
34657		Type: "TensorArrayReadV3",
34658		Input: []tf.Input{
34659			handle, index, flow_in,
34660		},
34661		Attrs: attrs,
34662	}
34663	op := scope.AddOperation(opspec)
34664	return op.Output(0)
34665}
34666
34667// Reduces sparse updates into the variable referenced by `resource` using the `max` operation.
34668//
34669// This operation computes
34670//
34671//     # Scalar indices
34672//     ref[indices, ...] = max(ref[indices, ...], updates[...])
34673//
34674//     # Vector indices (for each i)
34675//     ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...])
34676//
34677//     # High rank indices (for each i, ..., j)
34678//     ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], updates[i, ..., j, ...])
34679//
34680// Duplicate entries are handled correctly: if multiple `indices` reference
34681// the same location, their contributions are combined.
34682//
34683// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
34684//
34685// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
34686// <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
34687// </div>
34688//
34689// Arguments:
34690//	resource: Should be from a `Variable` node.
34691//	indices: A tensor of indices into the first dimension of `ref`.
34692//	updates: A tensor of updated values to add to `ref`.
34693//
34694// Returns the created operation.
34695func ResourceScatterMax(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation) {
34696	if scope.Err() != nil {
34697		return
34698	}
34699	opspec := tf.OpSpec{
34700		Type: "ResourceScatterMax",
34701		Input: []tf.Input{
34702			resource, indices, updates,
34703		},
34704	}
34705	return scope.AddOperation(opspec)
34706}
34707
34708// Computes the gradient for the tanh of `x` wrt its input.
34709//
34710// Specifically, `grad = dy * (1 - y*y)`, where `y = tanh(x)`, and `dy`
34711// is the corresponding input gradient.
34712func TanhGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
34713	if scope.Err() != nil {
34714		return
34715	}
34716	opspec := tf.OpSpec{
34717		Type: "TanhGrad",
34718		Input: []tf.Input{
34719			y, dy,
34720		},
34721	}
34722	op := scope.AddOperation(opspec)
34723	return op.Output(0)
34724}
34725
34726// Outputs a `Summary` protocol buffer with scalar values.
34727//
34728// The input `tags` and `values` must have the same shape.  The generated summary
34729// has a summary value for each tag-value pair in `tags` and `values`.
34730//
34731// Arguments:
34732//	tags: Tags for the summary.
34733//	values: Same shape as `tags.  Values for the summary.
34734//
34735// Returns Scalar.  Serialized `Summary` protocol buffer.
34736func ScalarSummary(scope *Scope, tags tf.Output, values tf.Output) (summary tf.Output) {
34737	if scope.Err() != nil {
34738		return
34739	}
34740	opspec := tf.OpSpec{
34741		Type: "ScalarSummary",
34742		Input: []tf.Input{
34743			tags, values,
34744		},
34745	}
34746	op := scope.AddOperation(opspec)
34747	return op.Output(0)
34748}
34749
34750// ImageSummaryAttr is an optional argument to ImageSummary.
34751type ImageSummaryAttr func(optionalAttr)
34752
34753// ImageSummaryMaxImages sets the optional max_images attribute to value.
34754//
34755// value: Max number of batch elements to generate images for.
34756// If not specified, defaults to 3
34757//
34758// REQUIRES: value >= 1
34759func ImageSummaryMaxImages(value int64) ImageSummaryAttr {
34760	return func(m optionalAttr) {
34761		m["max_images"] = value
34762	}
34763}
34764
34765// ImageSummaryBadColor sets the optional bad_color attribute to value.
34766//
34767// value: Color to use for pixels with non-finite values.
34768// If not specified, defaults to <dtype:DT_UINT8 tensor_shape:<dim:<size:4 > > int_val:255 int_val:0 int_val:0 int_val:255 >
34769func ImageSummaryBadColor(value tf.Tensor) ImageSummaryAttr {
34770	return func(m optionalAttr) {
34771		m["bad_color"] = value
34772	}
34773}
34774
34775// Outputs a `Summary` protocol buffer with images.
34776//
34777// The summary has up to `max_images` summary values containing images. The
34778// images are built from `tensor` which must be 4-D with shape `[batch_size,
34779// height, width, channels]` and where `channels` can be:
34780//
34781// *  1: `tensor` is interpreted as Grayscale.
34782// *  3: `tensor` is interpreted as RGB.
34783// *  4: `tensor` is interpreted as RGBA.
34784//
34785// The images have the same number of channels as the input tensor. For float
34786// input, the values are normalized one image at a time to fit in the range
34787// `[0, 255]`.  `uint8` values are unchanged.  The op uses two different
34788// normalization algorithms:
34789//
34790// *  If the input values are all positive, they are rescaled so the largest one
34791//    is 255.
34792//
34793// *  If any input value is negative, the values are shifted so input value 0.0
34794//    is at 127.  They are then rescaled so that either the smallest value is 0,
34795//    or the largest one is 255.
34796//
34797// The `tag` argument is a scalar `Tensor` of type `string`.  It is used to
34798// build the `tag` of the summary values:
34799//
34800// *  If `max_images` is 1, the summary value tag is '*tag*/image'.
34801// *  If `max_images` is greater than 1, the summary value tags are
34802//    generated sequentially as '*tag*/image/0', '*tag*/image/1', etc.
34803//
34804// The `bad_color` argument is the color to use in the generated images for
34805// non-finite input values.  It is a `uint8` 1-D tensor of length `channels`.
34806// Each element must be in the range `[0, 255]` (It represents the value of a
34807// pixel in the output image).  Non-finite values in the input tensor are
34808// replaced by this tensor in the output image.  The default value is the color
34809// red.
34810//
34811// Arguments:
34812//	tag: Scalar. Used to build the `tag` attribute of the summary values.
34813//	tensor: 4-D of shape `[batch_size, height, width, channels]` where
34814// `channels` is 1, 3, or 4.
34815//
34816// Returns Scalar. Serialized `Summary` protocol buffer.
34817func ImageSummary(scope *Scope, tag tf.Output, tensor tf.Output, optional ...ImageSummaryAttr) (summary tf.Output) {
34818	if scope.Err() != nil {
34819		return
34820	}
34821	attrs := map[string]interface{}{}
34822	for _, a := range optional {
34823		a(attrs)
34824	}
34825	opspec := tf.OpSpec{
34826		Type: "ImageSummary",
34827		Input: []tf.Input{
34828			tag, tensor,
34829		},
34830		Attrs: attrs,
34831	}
34832	op := scope.AddOperation(opspec)
34833	return op.Output(0)
34834}
34835
34836// AudioSummaryV2Attr is an optional argument to AudioSummaryV2.
34837type AudioSummaryV2Attr func(optionalAttr)
34838
34839// AudioSummaryV2MaxOutputs sets the optional max_outputs attribute to value.
34840//
34841// value: Max number of batch elements to generate audio for.
34842// If not specified, defaults to 3
34843//
34844// REQUIRES: value >= 1
34845func AudioSummaryV2MaxOutputs(value int64) AudioSummaryV2Attr {
34846	return func(m optionalAttr) {
34847		m["max_outputs"] = value
34848	}
34849}
34850
34851// Outputs a `Summary` protocol buffer with audio.
34852//
34853// The summary has up to `max_outputs` summary values containing audio. The
34854// audio is built from `tensor` which must be 3-D with shape `[batch_size,
34855// frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
34856// assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`.
34857//
34858// The `tag` argument is a scalar `Tensor` of type `string`.  It is used to
34859// build the `tag` of the summary values:
34860//
34861// *  If `max_outputs` is 1, the summary value tag is '*tag*/audio'.
34862// *  If `max_outputs` is greater than 1, the summary value tags are
34863//    generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.
34864//
34865// Arguments:
34866//	tag: Scalar. Used to build the `tag` attribute of the summary values.
34867//	tensor: 2-D of shape `[batch_size, frames]`.
34868//	sample_rate: The sample rate of the signal in hertz.
34869//
34870// Returns Scalar. Serialized `Summary` protocol buffer.
34871func AudioSummaryV2(scope *Scope, tag tf.Output, tensor tf.Output, sample_rate tf.Output, optional ...AudioSummaryV2Attr) (summary tf.Output) {
34872	if scope.Err() != nil {
34873		return
34874	}
34875	attrs := map[string]interface{}{}
34876	for _, a := range optional {
34877		a(attrs)
34878	}
34879	opspec := tf.OpSpec{
34880		Type: "AudioSummaryV2",
34881		Input: []tf.Input{
34882			tag, tensor, sample_rate,
34883		},
34884		Attrs: attrs,
34885	}
34886	op := scope.AddOperation(opspec)
34887	return op.Output(0)
34888}
34889
34890// Splits a tensor into a list.
34891//
34892// list[i] corresponds to lengths[i] tensors from the input tensor.
34893// The tensor must have rank at least 1 and contain exactly sum(lengths) elements.
34894//
34895// tensor: The input tensor.
34896// element_shape: A shape compatible with that of elements in the tensor.
34897// lengths: Vector of sizes of the 0th dimension of tensors in the list.
34898// output_handle: The list.
34899func TensorListSplit(scope *Scope, tensor tf.Output, element_shape tf.Output, lengths tf.Output) (output_handle tf.Output) {
34900	if scope.Err() != nil {
34901		return
34902	}
34903	opspec := tf.OpSpec{
34904		Type: "TensorListSplit",
34905		Input: []tf.Input{
34906			tensor, element_shape, lengths,
34907		},
34908	}
34909	op := scope.AddOperation(opspec)
34910	return op.Output(0)
34911}
34912
34913// AvgPoolAttr is an optional argument to AvgPool.
34914type AvgPoolAttr func(optionalAttr)
34915
34916// AvgPoolDataFormat sets the optional data_format attribute to value.
34917//
34918// value: Specify the data format of the input and output data. With the
34919// default format "NHWC", the data is stored in the order of:
34920//     [batch, in_height, in_width, in_channels].
34921// Alternatively, the format could be "NCHW", the data storage order of:
34922//     [batch, in_channels, in_height, in_width].
34923// If not specified, defaults to "NHWC"
34924func AvgPoolDataFormat(value string) AvgPoolAttr {
34925	return func(m optionalAttr) {
34926		m["data_format"] = value
34927	}
34928}
34929
34930// Performs average pooling on the input.
34931//
34932// Each entry in `output` is the mean of the corresponding size `ksize`
34933// window in `value`.
34934//
34935// Arguments:
34936//	value: 4-D with shape `[batch, height, width, channels]`.
34937//	ksize: The size of the sliding window for each dimension of `value`.
34938//	strides: The stride of the sliding window for each dimension of `value`.
34939//	padding: The type of padding algorithm to use.
34940//
34941// Returns The average pooled output tensor.
34942func AvgPool(scope *Scope, value tf.Output, ksize []int64, strides []int64, padding string, optional ...AvgPoolAttr) (output tf.Output) {
34943	if scope.Err() != nil {
34944		return
34945	}
34946	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
34947	for _, a := range optional {
34948		a(attrs)
34949	}
34950	opspec := tf.OpSpec{
34951		Type: "AvgPool",
34952		Input: []tf.Input{
34953			value,
34954		},
34955		Attrs: attrs,
34956	}
34957	op := scope.AddOperation(opspec)
34958	return op.Output(0)
34959}
34960
34961// Merges summaries.
34962//
34963// This op creates a
34964// [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
34965// protocol buffer that contains the union of all the values in the input
34966// summaries.
34967//
34968// When the Op is run, it reports an `InvalidArgument` error if multiple values
34969// in the summaries to merge use the same tag.
34970//
34971// Arguments:
34972//	inputs: Can be of any shape.  Each must contain serialized `Summary` protocol
34973// buffers.
34974//
34975// Returns Scalar. Serialized `Summary` protocol buffer.
34976func MergeSummary(scope *Scope, inputs []tf.Output) (summary tf.Output) {
34977	if scope.Err() != nil {
34978		return
34979	}
34980	opspec := tf.OpSpec{
34981		Type: "MergeSummary",
34982		Input: []tf.Input{
34983			tf.OutputList(inputs),
34984		},
34985	}
34986	op := scope.AddOperation(opspec)
34987	return op.Output(0)
34988}
34989
34990// The shape of the elements of the given list, as a tensor.
34991//
34992//   input_handle: the list
34993//   element_shape: the shape of elements of the list
34994func TensorListElementShape(scope *Scope, input_handle tf.Output, shape_type tf.DataType) (element_shape tf.Output) {
34995	if scope.Err() != nil {
34996		return
34997	}
34998	attrs := map[string]interface{}{"shape_type": shape_type}
34999	opspec := tf.OpSpec{
35000		Type: "TensorListElementShape",
35001		Input: []tf.Input{
35002			input_handle,
35003		},
35004		Attrs: attrs,
35005	}
35006	op := scope.AddOperation(opspec)
35007	return op.Output(0)
35008}
35009
35010// Returns the item in the list with the given index.
35011//
35012// input_handle: the list
35013// index: the position in the list from which an element will be retrieved
35014// item: the element at that position
35015//
35016//
35017func TensorListGetItem(scope *Scope, input_handle tf.Output, index tf.Output, element_shape tf.Output, element_dtype tf.DataType) (item tf.Output) {
35018	if scope.Err() != nil {
35019		return
35020	}
35021	attrs := map[string]interface{}{"element_dtype": element_dtype}
35022	opspec := tf.OpSpec{
35023		Type: "TensorListGetItem",
35024		Input: []tf.Input{
35025			input_handle, index, element_shape,
35026		},
35027		Attrs: attrs,
35028	}
35029	op := scope.AddOperation(opspec)
35030	return op.Output(0)
35031}
35032
35033// Resizes the list.
35034//
35035//
35036// input_handle: the input list
35037// size: size of the output list
35038//
35039func TensorListResize(scope *Scope, input_handle tf.Output, size tf.Output) (output_handle tf.Output) {
35040	if scope.Err() != nil {
35041		return
35042	}
35043	opspec := tf.OpSpec{
35044		Type: "TensorListResize",
35045		Input: []tf.Input{
35046			input_handle, size,
35047		},
35048	}
35049	op := scope.AddOperation(opspec)
35050	return op.Output(0)
35051}
35052
35053// Returns a diagonal tensor with a given diagonal values.
35054//
35055// Given a `diagonal`, this operation returns a tensor with the `diagonal` and
35056// everything else padded with zeros. The diagonal is computed as follows:
35057//
35058// Assume `diagonal` has dimensions [D1,..., Dk], then the output is a tensor of
35059// rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where:
35060//
35061// `output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere else.
35062//
35063// For example:
35064//
35065// ```
35066// # 'diagonal' is [1, 2, 3, 4]
35067// tf.diag(diagonal) ==> [[1, 0, 0, 0]
35068//                        [0, 2, 0, 0]
35069//                        [0, 0, 3, 0]
35070//                        [0, 0, 0, 4]]
35071// ```
35072//
35073// Arguments:
35074//	diagonal: Rank k tensor where k is at most 1.
35075func Diag(scope *Scope, diagonal tf.Output) (output tf.Output) {
35076	if scope.Err() != nil {
35077		return
35078	}
35079	opspec := tf.OpSpec{
35080		Type: "Diag",
35081		Input: []tf.Input{
35082			diagonal,
35083		},
35084	}
35085	op := scope.AddOperation(opspec)
35086	return op.Output(0)
35087}
35088
35089// ParameterizedTruncatedNormalAttr is an optional argument to ParameterizedTruncatedNormal.
35090type ParameterizedTruncatedNormalAttr func(optionalAttr)
35091
35092// ParameterizedTruncatedNormalSeed sets the optional seed attribute to value.
35093//
35094// value: If either `seed` or `seed2` are set to be non-zero, the random number
35095// generator is seeded by the given seed.  Otherwise, it is seeded by a
35096// random seed.
35097// If not specified, defaults to 0
35098func ParameterizedTruncatedNormalSeed(value int64) ParameterizedTruncatedNormalAttr {
35099	return func(m optionalAttr) {
35100		m["seed"] = value
35101	}
35102}
35103
35104// ParameterizedTruncatedNormalSeed2 sets the optional seed2 attribute to value.
35105//
35106// value: A second seed to avoid seed collision.
35107// If not specified, defaults to 0
35108func ParameterizedTruncatedNormalSeed2(value int64) ParameterizedTruncatedNormalAttr {
35109	return func(m optionalAttr) {
35110		m["seed2"] = value
35111	}
35112}
35113
35114// Outputs random values from a normal distribution. The parameters may each be a
35115//
35116// scalar which applies to the entire output, or a vector of length shape[0] which
35117// stores the parameters for each batch.
35118//
35119// Arguments:
35120//	shape: The shape of the output tensor. Batches are indexed by the 0th dimension.
35121//	means: The mean parameter of each batch.
35122//	stdevs: The standard deviation parameter of each batch. Must be greater than 0.
35123//	minvals: The minimum cutoff. May be -infinity.
35124//	maxvals: The maximum cutoff. May be +infinity, and must be more than the minval
35125// for each batch.
35126//
35127// Returns A matrix of shape num_batches x samples_per_batch, filled with random
35128// truncated normal values using the parameters for each row.
35129func ParameterizedTruncatedNormal(scope *Scope, shape tf.Output, means tf.Output, stdevs tf.Output, minvals tf.Output, maxvals tf.Output, optional ...ParameterizedTruncatedNormalAttr) (output tf.Output) {
35130	if scope.Err() != nil {
35131		return
35132	}
35133	attrs := map[string]interface{}{}
35134	for _, a := range optional {
35135		a(attrs)
35136	}
35137	opspec := tf.OpSpec{
35138		Type: "ParameterizedTruncatedNormal",
35139		Input: []tf.Input{
35140			shape, means, stdevs, minvals, maxvals,
35141		},
35142		Attrs: attrs,
35143	}
35144	op := scope.AddOperation(opspec)
35145	return op.Output(0)
35146}
35147
35148// Sets the index-th position of the list to contain the given tensor.
35149//
35150// input_handle: the list
35151// index: the position in the list to which the tensor will be assigned
35152// item: the element to be assigned to that position
35153// output_handle: the new list, with the element in the proper position
35154//
35155func TensorListSetItem(scope *Scope, input_handle tf.Output, index tf.Output, item tf.Output) (output_handle tf.Output) {
35156	if scope.Err() != nil {
35157		return
35158	}
35159	opspec := tf.OpSpec{
35160		Type: "TensorListSetItem",
35161		Input: []tf.Input{
35162			input_handle, index, item,
35163		},
35164	}
35165	op := scope.AddOperation(opspec)
35166	return op.Output(0)
35167}
35168
35169// Creates a TensorList by indexing into a Tensor.
35170//
35171// Each member of the TensorList corresponds to one row of the input tensor,
35172// specified by the given index (see `tf.gather`).
35173//
35174// tensor: The input tensor.
35175// indices: The indices used to index into the list.
35176// element_shape: The shape of the elements in the list (can be less specified than
35177//   the shape of the tensor).
35178// output_handle: The TensorList.
35179func TensorListScatter(scope *Scope, tensor tf.Output, indices tf.Output, element_shape tf.Output) (output_handle tf.Output) {
35180	if scope.Err() != nil {
35181		return
35182	}
35183	opspec := tf.OpSpec{
35184		Type: "TensorListScatter",
35185		Input: []tf.Input{
35186			tensor, indices, element_shape,
35187		},
35188	}
35189	op := scope.AddOperation(opspec)
35190	return op.Output(0)
35191}
35192
35193// Deprecated. Use TensorArrayScatterV3
35194//
35195// DEPRECATED at GraphDef version 26: Use TensorArrayScatterV3
35196func TensorArrayScatterV2(scope *Scope, handle tf.Output, indices tf.Output, value tf.Output, flow_in tf.Output) (flow_out tf.Output) {
35197	if scope.Err() != nil {
35198		return
35199	}
35200	opspec := tf.OpSpec{
35201		Type: "TensorArrayScatterV2",
35202		Input: []tf.Input{
35203			handle, indices, value, flow_in,
35204		},
35205	}
35206	op := scope.AddOperation(opspec)
35207	return op.Output(0)
35208}
35209
35210// AsStringAttr is an optional argument to AsString.
35211type AsStringAttr func(optionalAttr)
35212
35213// AsStringPrecision sets the optional precision attribute to value.
35214//
35215// value: The post-decimal precision to use for floating point numbers.
35216// Only used if precision > -1.
35217// If not specified, defaults to -1
35218func AsStringPrecision(value int64) AsStringAttr {
35219	return func(m optionalAttr) {
35220		m["precision"] = value
35221	}
35222}
35223
35224// AsStringScientific sets the optional scientific attribute to value.
35225//
35226// value: Use scientific notation for floating point numbers.
35227// If not specified, defaults to false
35228func AsStringScientific(value bool) AsStringAttr {
35229	return func(m optionalAttr) {
35230		m["scientific"] = value
35231	}
35232}
35233
35234// AsStringShortest sets the optional shortest attribute to value.
35235//
35236// value: Use shortest representation (either scientific or standard) for
35237// floating point numbers.
35238// If not specified, defaults to false
35239func AsStringShortest(value bool) AsStringAttr {
35240	return func(m optionalAttr) {
35241		m["shortest"] = value
35242	}
35243}
35244
35245// AsStringWidth sets the optional width attribute to value.
35246//
35247// value: Pad pre-decimal numbers to this width.
35248// Applies to both floating point and integer numbers.
35249// Only used if width > -1.
35250// If not specified, defaults to -1
35251func AsStringWidth(value int64) AsStringAttr {
35252	return func(m optionalAttr) {
35253		m["width"] = value
35254	}
35255}
35256
35257// AsStringFill sets the optional fill attribute to value.
35258//
35259// value: The value to pad if width > -1.  If empty, pads with spaces.
35260// Another typical value is '0'.  String cannot be longer than 1 character.
35261// If not specified, defaults to ""
35262func AsStringFill(value string) AsStringAttr {
35263	return func(m optionalAttr) {
35264		m["fill"] = value
35265	}
35266}
35267
35268// Converts each entry in the given tensor to strings.  Supports many numeric
35269//
35270// types and boolean.
35271func AsString(scope *Scope, input tf.Output, optional ...AsStringAttr) (output tf.Output) {
35272	if scope.Err() != nil {
35273		return
35274	}
35275	attrs := map[string]interface{}{}
35276	for _, a := range optional {
35277		a(attrs)
35278	}
35279	opspec := tf.OpSpec{
35280		Type: "AsString",
35281		Input: []tf.Input{
35282			input,
35283		},
35284		Attrs: attrs,
35285	}
35286	op := scope.AddOperation(opspec)
35287	return op.Output(0)
35288}
35289
35290// Returns a `RaggedTensor` containing the specified sequences of numbers.
35291//
35292//
35293// Returns a `RaggedTensor` `result` composed from `rt_dense_values` and
35294// `rt_nested_splits`, such that
35295// `result[i] = range(starts[i], limits[i], deltas[i])`.
35296//
35297// ```python
35298// >>> (rt_nested_splits, rt_dense_values) = gen_ragged_ops.ragged_range(
35299// ...     starts=[2, 5, 8], limits=[3, 5, 12], deltas=1)
35300// >>> result = ragged.from_nested_row_splits(rt_dense_values, rt_nested_splits)
35301// >>> print result.eval().tolist()
35302// [[2],               # result[0] = range(2, 3)
35303//  [],                # result[1] = range(5, 5)
35304//  [8, 9, 10, 11]]    # result[2] = range(8, 12)
35305// ```
35306//
35307// The input tensors `starts`, `limits`, and `deltas` may be scalars or vectors.
35308// The vector inputs must all have the same size.  Scalar inputs are broadcast
35309// to match the size of the vector inputs.
35310//
35311// Arguments:
35312//	starts: The starts of each range.
35313//	limits: The limits of each range.
35314//	deltas: The deltas of each range.
35315//
35316// Returns The `row_splits` for the returned `RaggedTensor`.The `flat_values` for the returned `RaggedTensor`.
35317func RaggedRange(scope *Scope, starts tf.Output, limits tf.Output, deltas tf.Output) (rt_nested_splits tf.Output, rt_dense_values tf.Output) {
35318	if scope.Err() != nil {
35319		return
35320	}
35321	opspec := tf.OpSpec{
35322		Type: "RaggedRange",
35323		Input: []tf.Input{
35324			starts, limits, deltas,
35325		},
35326	}
35327	op := scope.AddOperation(opspec)
35328	return op.Output(0), op.Output(1)
35329}
35330
35331// Deprecated, use python implementation tf.linalg.matrix_exponential.
35332//
35333// DEPRECATED at GraphDef version 27: Use Python implementation tf.linalg.matrix_exponential instead.
35334func MatrixExponential(scope *Scope, input tf.Output) (output tf.Output) {
35335	if scope.Err() != nil {
35336		return
35337	}
35338	opspec := tf.OpSpec{
35339		Type: "MatrixExponential",
35340		Input: []tf.Input{
35341			input,
35342		},
35343	}
35344	op := scope.AddOperation(opspec)
35345	return op.Output(0)
35346}
35347
35348// Computes the Cholesky decomposition of one or more square matrices.
35349//
35350// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
35351// form square matrices.
35352//
35353// The input has to be symmetric and positive definite. Only the lower-triangular
35354// part of the input will be used for this operation. The upper-triangular part
35355// will not be read.
35356//
35357// The output is a tensor of the same shape as the input
35358// containing the Cholesky decompositions for all input submatrices `[..., :, :]`.
35359//
35360// **Note**: The gradient computation on GPU is faster for large matrices but
35361// not for large batch dimensions when the submatrices are small. In this
35362// case it might be faster to use the CPU.
35363//
35364// Arguments:
35365//	input: Shape is `[..., M, M]`.
35366//
35367// Returns Shape is `[..., M, M]`.
35368func Cholesky(scope *Scope, input tf.Output) (output tf.Output) {
35369	if scope.Err() != nil {
35370		return
35371	}
35372	opspec := tf.OpSpec{
35373		Type: "Cholesky",
35374		Input: []tf.Input{
35375			input,
35376		},
35377	}
35378	op := scope.AddOperation(opspec)
35379	return op.Output(0)
35380}
35381
35382// Writes contents to the file at input filename. Creates file and recursively
35383//
35384// creates directory if not existing.
35385//
35386// Arguments:
35387//	filename: scalar. The name of the file to which we write the contents.
35388//	contents: scalar. The content to be written to the output file.
35389//
35390// Returns the created operation.
35391func WriteFile(scope *Scope, filename tf.Output, contents tf.Output) (o *tf.Operation) {
35392	if scope.Err() != nil {
35393		return
35394	}
35395	opspec := tf.OpSpec{
35396		Type: "WriteFile",
35397		Input: []tf.Input{
35398			filename, contents,
35399		},
35400	}
35401	return scope.AddOperation(opspec)
35402}
35403
35404// AllAttr is an optional argument to All.
35405type AllAttr func(optionalAttr)
35406
35407// AllKeepDims sets the optional keep_dims attribute to value.
35408//
35409// value: If true, retain reduced dimensions with length 1.
35410// If not specified, defaults to false
35411func AllKeepDims(value bool) AllAttr {
35412	return func(m optionalAttr) {
35413		m["keep_dims"] = value
35414	}
35415}
35416
35417// Computes the "logical and" of elements across dimensions of a tensor.
35418//
35419// Reduces `input` along the dimensions given in `axis`. Unless
35420// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
35421// `axis`. If `keep_dims` is true, the reduced dimensions are
35422// retained with length 1.
35423//
35424// Arguments:
35425//	input: The tensor to reduce.
35426//	axis: The dimensions to reduce. Must be in the range
35427// `[-rank(input), rank(input))`.
35428//
35429// Returns The reduced tensor.
35430func All(scope *Scope, input tf.Output, axis tf.Output, optional ...AllAttr) (output tf.Output) {
35431	if scope.Err() != nil {
35432		return
35433	}
35434	attrs := map[string]interface{}{}
35435	for _, a := range optional {
35436		a(attrs)
35437	}
35438	opspec := tf.OpSpec{
35439		Type: "All",
35440		Input: []tf.Input{
35441			input, axis,
35442		},
35443		Attrs: attrs,
35444	}
35445	op := scope.AddOperation(opspec)
35446	return op.Output(0)
35447}
35448
35449// Computes the Eigen Decomposition of a batch of square self-adjoint matrices.
35450//
35451// DEPRECATED at GraphDef version 11: Use SelfAdjointEigV2 instead.
35452//
35453// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
35454// form square matrices, with the same constraints as the single matrix
35455// SelfAdjointEig.
35456//
35457// The result is a [..., M+1, M] matrix with [..., 0,:] containing the
35458// eigenvalues, and subsequent [...,1:, :] containing the eigenvectors. The eigenvalues
35459// are sorted in non-decreasing order.
35460//
35461// Arguments:
35462//	input: Shape is `[..., M, M]`.
35463//
35464// Returns Shape is `[..., M+1, M]`.
35465func SelfAdjointEig(scope *Scope, input tf.Output) (output tf.Output) {
35466	if scope.Err() != nil {
35467		return
35468	}
35469	opspec := tf.OpSpec{
35470		Type: "SelfAdjointEig",
35471		Input: []tf.Input{
35472			input,
35473		},
35474	}
35475	op := scope.AddOperation(opspec)
35476	return op.Output(0)
35477}
35478
35479// Computes softplus gradients for a softplus operation.
35480//
35481// Arguments:
35482//	gradients: The backpropagated gradients to the corresponding softplus operation.
35483//	features: The features passed as input to the corresponding softplus operation.
35484//
35485// Returns The gradients: `gradients / (1 + exp(-features))`.
35486func SoftplusGrad(scope *Scope, gradients tf.Output, features tf.Output) (backprops tf.Output) {
35487	if scope.Err() != nil {
35488		return
35489	}
35490	opspec := tf.OpSpec{
35491		Type: "SoftplusGrad",
35492		Input: []tf.Input{
35493			gradients, features,
35494		},
35495	}
35496	op := scope.AddOperation(opspec)
35497	return op.Output(0)
35498}
35499
35500// Solves tridiagonal systems of equations.
35501//
35502// `diagonals` is a tensor of shape `[..., 3, M]` whose inner-most 2 dimensions
35503// represent matrices with three rows being the superdiagonal, diagonals, and
35504// subdiagonals, in order. The last element of the superdiagonal and the first
35505// element of the subdiagonal is ignored.
35506// `rhs` is a tensor of shape `[..., M, K]`, representing K right-hand sides per
35507// each left-hand side.
35508// The output is a tensor of shape `[..., M, K]` containing the solutions.
35509//
35510// Arguments:
35511//	diagonals: Shape is `[..., 3, M]`.
35512//	rhs: Shape is `[..., M, K]`.
35513//
35514// Returns Shape is `[..., M, K]`.
35515func TridiagonalSolve(scope *Scope, diagonals tf.Output, rhs tf.Output) (output tf.Output) {
35516	if scope.Err() != nil {
35517		return
35518	}
35519	opspec := tf.OpSpec{
35520		Type: "TridiagonalSolve",
35521		Input: []tf.Input{
35522			diagonals, rhs,
35523		},
35524	}
35525	op := scope.AddOperation(opspec)
35526	return op.Output(0)
35527}
35528
35529// SelfAdjointEigV2Attr is an optional argument to SelfAdjointEigV2.
35530type SelfAdjointEigV2Attr func(optionalAttr)
35531
35532// SelfAdjointEigV2ComputeV sets the optional compute_v attribute to value.
35533//
35534// value: If `True` then eigenvectors will be computed and returned in `v`.
35535// Otherwise, only the eigenvalues will be computed.
35536// If not specified, defaults to true
35537func SelfAdjointEigV2ComputeV(value bool) SelfAdjointEigV2Attr {
35538	return func(m optionalAttr) {
35539		m["compute_v"] = value
35540	}
35541}
35542
35543// Computes the eigen decomposition of one or more square self-adjoint matrices.
35544//
35545// Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in
35546// `input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`. The eigenvalues
35547// are sorted in non-decreasing order.
35548//
35549// ```python
35550// # a is a tensor.
35551// # e is a tensor of eigenvalues.
35552// # v is a tensor of eigenvectors.
35553// e, v = self_adjoint_eig(a)
35554// e = self_adjoint_eig(a, compute_v=False)
35555// ```
35556//
35557// Arguments:
35558//	input: `Tensor` input of shape `[N, N]`.
35559//
35560// Returns Eigenvalues. Shape is `[N]`.Eigenvectors. Shape is `[N, N]`.
35561func SelfAdjointEigV2(scope *Scope, input tf.Output, optional ...SelfAdjointEigV2Attr) (e tf.Output, v tf.Output) {
35562	if scope.Err() != nil {
35563		return
35564	}
35565	attrs := map[string]interface{}{}
35566	for _, a := range optional {
35567		a(attrs)
35568	}
35569	opspec := tf.OpSpec{
35570		Type: "SelfAdjointEigV2",
35571		Input: []tf.Input{
35572			input,
35573		},
35574		Attrs: attrs,
35575	}
35576	op := scope.AddOperation(opspec)
35577	return op.Output(0), op.Output(1)
35578}
35579
35580// Adjust the saturation of one or more images.
35581//
35582// `images` is a tensor of at least 3 dimensions.  The last dimension is
35583// interpretted as channels, and must be three.
35584//
35585// The input image is considered in the RGB colorspace. Conceptually, the RGB
35586// colors are first mapped into HSV. A scale is then applied all the saturation
35587// values, and then remapped back to RGB colorspace.
35588//
35589// Arguments:
35590//	images: Images to adjust.  At least 3-D.
35591//	scale: A float scale to add to the saturation.
35592//
35593// Returns The hue-adjusted image or images.
35594func AdjustSaturation(scope *Scope, images tf.Output, scale tf.Output) (output tf.Output) {
35595	if scope.Err() != nil {
35596		return
35597	}
35598	opspec := tf.OpSpec{
35599		Type: "AdjustSaturation",
35600		Input: []tf.Input{
35601			images, scale,
35602		},
35603	}
35604	op := scope.AddOperation(opspec)
35605	return op.Output(0)
35606}
35607
35608// LuAttr is an optional argument to Lu.
35609type LuAttr func(optionalAttr)
35610
35611// LuOutputIdxType sets the optional output_idx_type attribute to value.
35612// If not specified, defaults to DT_INT32
35613func LuOutputIdxType(value tf.DataType) LuAttr {
35614	return func(m optionalAttr) {
35615		m["output_idx_type"] = value
35616	}
35617}
35618
35619// Computes the LU decomposition of one or more square matrices.
35620//
35621// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
35622// form square matrices.
35623//
35624// The input has to be invertible.
35625//
35626// The output consists of two tensors LU and P containing the LU decomposition
35627// of all input submatrices `[..., :, :]`. LU encodes the lower triangular and
35628// upper triangular factors.
35629//
35630// For each input submatrix of shape `[M, M]`, L is a lower triangular matrix of
35631// shape `[M, M]` with unit diagonal whose entries correspond to the strictly lower
35632// triangular part of LU. U is a upper triangular matrix of shape `[M, M]` whose
35633// entries correspond to the upper triangular part, including the diagonal, of LU.
35634//
35635// P represents a permutation matrix encoded as a list of indices each between `0`
35636// and `M-1`, inclusive. If P_mat denotes the permutation matrix corresponding to
35637// P, then the L, U and P satisfies P_mat * input = L * U.
35638//
35639// Arguments:
35640//	input: A tensor of shape `[..., M, M]` whose inner-most 2 dimensions form matrices of
35641// size `[M, M]`.
35642//
35643// Returns A tensor of shape `[..., M, M]` whose strictly lower triangular part denotes the
35644// lower triangular factor `L` with unit diagonal, and whose upper triangular part
35645// denotes the upper triangular factor `U`.Permutation of the rows encoded as a list of indices in `0..M-1`. Shape is
35646// `[..., M]`.
35647// @compatibility(scipy)
35648// Similar to `scipy.linalg.lu`, except the triangular factors `L` and `U` are
35649// packed into a single tensor, the permutation is applied to `input` instead of
35650// the right hand side and the permutation `P` is returned as a list of indices
35651// instead of a permutation matrix.
35652// @end_compatibility
35653func Lu(scope *Scope, input tf.Output, optional ...LuAttr) (lu tf.Output, p tf.Output) {
35654	if scope.Err() != nil {
35655		return
35656	}
35657	attrs := map[string]interface{}{}
35658	for _, a := range optional {
35659		a(attrs)
35660	}
35661	opspec := tf.OpSpec{
35662		Type: "Lu",
35663		Input: []tf.Input{
35664			input,
35665		},
35666		Attrs: attrs,
35667	}
35668	op := scope.AddOperation(opspec)
35669	return op.Output(0), op.Output(1)
35670}
35671
35672// Deprecated. Use TensorArrayCloseV3
35673//
35674// DEPRECATED at GraphDef version 26: Use TensorArrayCloseV3
35675//
35676// Returns the created operation.
35677func TensorArrayCloseV2(scope *Scope, handle tf.Output) (o *tf.Operation) {
35678	if scope.Err() != nil {
35679		return
35680	}
35681	opspec := tf.OpSpec{
35682		Type: "TensorArrayCloseV2",
35683		Input: []tf.Input{
35684			handle,
35685		},
35686	}
35687	return scope.AddOperation(opspec)
35688}
35689
35690// EncodeBase64Attr is an optional argument to EncodeBase64.
35691type EncodeBase64Attr func(optionalAttr)
35692
35693// EncodeBase64Pad sets the optional pad attribute to value.
35694//
35695// value: Bool whether padding is applied at the ends.
35696// If not specified, defaults to false
35697func EncodeBase64Pad(value bool) EncodeBase64Attr {
35698	return func(m optionalAttr) {
35699		m["pad"] = value
35700	}
35701}
35702
35703// Encode strings into web-safe base64 format.
35704//
35705// Refer to the following article for more information on base64 format:
35706// en.wikipedia.org/wiki/Base64. Base64 strings may have padding with '=' at the
35707// end so that the encoded has length multiple of 4. See Padding section of the
35708// link above.
35709//
35710// Web-safe means that the encoder uses - and _ instead of + and /.
35711//
35712// Arguments:
35713//	input: Strings to be encoded.
35714//
35715// Returns Input strings encoded in base64.
35716func EncodeBase64(scope *Scope, input tf.Output, optional ...EncodeBase64Attr) (output tf.Output) {
35717	if scope.Err() != nil {
35718		return
35719	}
35720	attrs := map[string]interface{}{}
35721	for _, a := range optional {
35722		a(attrs)
35723	}
35724	opspec := tf.OpSpec{
35725		Type: "EncodeBase64",
35726		Input: []tf.Input{
35727			input,
35728		},
35729		Attrs: attrs,
35730	}
35731	op := scope.AddOperation(opspec)
35732	return op.Output(0)
35733}
35734
35735// A dataset that creates window datasets from the input dataset.
35736//
35737// Arguments:
35738//
35739//	size: A scalar representing the number of elements to accumulate in a window.
35740//	shift: A scalar representing the steps moving the sliding window forward in one
35741// iteration. It must be positive.
35742//	stride: A scalar representing the stride of the input elements of the sliding window.
35743// It must be positive.
35744//	drop_remainder: A scalar representing whether a window should be dropped in case its size is
35745// smaller than desired.
35746//
35747//
35748func WindowDataset(scope *Scope, input_dataset tf.Output, size tf.Output, shift tf.Output, stride tf.Output, drop_remainder tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
35749	if scope.Err() != nil {
35750		return
35751	}
35752	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
35753	opspec := tf.OpSpec{
35754		Type: "WindowDataset",
35755		Input: []tf.Input{
35756			input_dataset, size, shift, stride, drop_remainder,
35757		},
35758		Attrs: attrs,
35759	}
35760	op := scope.AddOperation(opspec)
35761	return op.Output(0)
35762}
35763
35764// Computes the matrix square root of one or more square matrices:
35765//
35766// matmul(sqrtm(A), sqrtm(A)) = A
35767//
35768// The input matrix should be invertible. If the input matrix is real, it should
35769// have no eigenvalues which are real and negative (pairs of complex conjugate
35770// eigenvalues are allowed).
35771//
35772// The matrix square root is computed by first reducing the matrix to
35773// quasi-triangular form with the real Schur decomposition. The square root
35774// of the quasi-triangular matrix is then computed directly. Details of
35775// the algorithm can be found in: Nicholas J. Higham, "Computing real
35776// square roots of a real matrix", Linear Algebra Appl., 1987.
35777//
35778// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
35779// form square matrices. The output is a tensor of the same shape as the input
35780// containing the matrix square root for all input submatrices `[..., :, :]`.
35781//
35782// Arguments:
35783//	input: Shape is `[..., M, M]`.
35784//
35785// Returns Shape is `[..., M, M]`.
35786//
35787// @compatibility(scipy)
35788// Equivalent to scipy.linalg.sqrtm
35789// @end_compatibility
35790func MatrixSquareRoot(scope *Scope, input tf.Output) (output tf.Output) {
35791	if scope.Err() != nil {
35792		return
35793	}
35794	opspec := tf.OpSpec{
35795		Type: "MatrixSquareRoot",
35796		Input: []tf.Input{
35797			input,
35798		},
35799	}
35800	op := scope.AddOperation(opspec)
35801	return op.Output(0)
35802}
35803
35804// SvdAttr is an optional argument to Svd.
35805type SvdAttr func(optionalAttr)
35806
35807// SvdComputeUv sets the optional compute_uv attribute to value.
35808//
35809// value: If true, left and right singular vectors will be
35810// computed and returned in `u` and `v`, respectively.
35811// If false, `u` and `v` are not set and should never referenced.
35812// If not specified, defaults to true
35813func SvdComputeUv(value bool) SvdAttr {
35814	return func(m optionalAttr) {
35815		m["compute_uv"] = value
35816	}
35817}
35818
35819// SvdFullMatrices sets the optional full_matrices attribute to value.
35820//
35821// value: If true, compute full-sized `u` and `v`. If false
35822// (the default), compute only the leading `P` singular vectors.
35823// Ignored if `compute_uv` is `False`.
35824// If not specified, defaults to false
35825func SvdFullMatrices(value bool) SvdAttr {
35826	return func(m optionalAttr) {
35827		m["full_matrices"] = value
35828	}
35829}
35830
35831// Computes the singular value decompositions of one or more matrices.
35832//
35833// Computes the SVD of each inner matrix in `input` such that
35834// `input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :, :])`
35835//
35836// ```python
35837// # a is a tensor containing a batch of matrices.
35838// # s is a tensor of singular values for each matrix.
35839// # u is the tensor containing of left singular vectors for each matrix.
35840// # v is the tensor containing of right singular vectors for each matrix.
35841// s, u, v = svd(a)
35842// s, _, _ = svd(a, compute_uv=False)
35843// ```
35844//
35845// Arguments:
35846//	input: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions
35847// form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`.
35848//
35849// Returns Singular values. Shape is `[..., P]`.Left singular vectors. If `full_matrices` is `False` then shape is
35850// `[..., M, P]`; if `full_matrices` is `True` then shape is
35851// `[..., M, M]`. Undefined if `compute_uv` is `False`.Left singular vectors. If `full_matrices` is `False` then shape is
35852// `[..., N, P]`. If `full_matrices` is `True` then shape is `[..., N, N]`.
35853// Undefined if `compute_uv` is false.
35854func Svd(scope *Scope, input tf.Output, optional ...SvdAttr) (s tf.Output, u tf.Output, v tf.Output) {
35855	if scope.Err() != nil {
35856		return
35857	}
35858	attrs := map[string]interface{}{}
35859	for _, a := range optional {
35860		a(attrs)
35861	}
35862	opspec := tf.OpSpec{
35863		Type: "Svd",
35864		Input: []tf.Input{
35865			input,
35866		},
35867		Attrs: attrs,
35868	}
35869	op := scope.AddOperation(opspec)
35870	return op.Output(0), op.Output(1), op.Output(2)
35871}
35872
35873// Converts one or more images from RGB to HSV.
35874//
35875// Outputs a tensor of the same shape as the `images` tensor, containing the HSV
35876// value of the pixels. The output is only well defined if the value in `images`
35877// are in `[0,1]`.
35878//
35879// `output[..., 0]` contains hue, `output[..., 1]` contains saturation, and
35880// `output[..., 2]` contains value. All HSV values are in `[0,1]`. A hue of 0
35881// corresponds to pure red, hue 1/3 is pure green, and 2/3 is pure blue.
35882//
35883// Arguments:
35884//	images: 1-D or higher rank. RGB data to convert. Last dimension must be size 3.
35885//
35886// Returns `images` converted to HSV.
35887func RGBToHSV(scope *Scope, images tf.Output) (output tf.Output) {
35888	if scope.Err() != nil {
35889		return
35890	}
35891	opspec := tf.OpSpec{
35892		Type: "RGBToHSV",
35893		Input: []tf.Input{
35894			images,
35895		},
35896	}
35897	op := scope.AddOperation(opspec)
35898	return op.Output(0)
35899}
35900
35901// Does nothing. Only useful as a placeholder for control edges.
35902//
35903// Returns the created operation.
35904func NoOp(scope *Scope) (o *tf.Operation) {
35905	if scope.Err() != nil {
35906		return
35907	}
35908	opspec := tf.OpSpec{
35909		Type: "NoOp",
35910	}
35911	return scope.AddOperation(opspec)
35912}
35913
35914// MergeV2CheckpointsAttr is an optional argument to MergeV2Checkpoints.
35915type MergeV2CheckpointsAttr func(optionalAttr)
35916
35917// MergeV2CheckpointsDeleteOldDirs sets the optional delete_old_dirs attribute to value.
35918//
35919// value: see above.
35920// If not specified, defaults to true
35921func MergeV2CheckpointsDeleteOldDirs(value bool) MergeV2CheckpointsAttr {
35922	return func(m optionalAttr) {
35923		m["delete_old_dirs"] = value
35924	}
35925}
35926
35927// V2 format specific: merges the metadata files of sharded checkpoints.  The
35928//
35929// result is one logical checkpoint, with one physical metadata file and renamed
35930// data files.
35931//
35932// Intended for "grouping" multiple checkpoints in a sharded checkpoint setup.
35933//
35934// If delete_old_dirs is true, attempts to delete recursively the dirname of each
35935// path in the input checkpoint_prefixes.  This is useful when those paths are non
35936// user-facing temporary locations.
35937//
35938// Arguments:
35939//	checkpoint_prefixes: prefixes of V2 checkpoints to merge.
35940//	destination_prefix: scalar.  The desired final prefix.  Allowed to be the same
35941// as one of the checkpoint_prefixes.
35942//
35943// Returns the created operation.
35944func MergeV2Checkpoints(scope *Scope, checkpoint_prefixes tf.Output, destination_prefix tf.Output, optional ...MergeV2CheckpointsAttr) (o *tf.Operation) {
35945	if scope.Err() != nil {
35946		return
35947	}
35948	attrs := map[string]interface{}{}
35949	for _, a := range optional {
35950		a(attrs)
35951	}
35952	opspec := tf.OpSpec{
35953		Type: "MergeV2Checkpoints",
35954		Input: []tf.Input{
35955			checkpoint_prefixes, destination_prefix,
35956		},
35957		Attrs: attrs,
35958	}
35959	return scope.AddOperation(opspec)
35960}
35961
35962// Saves input tensors slices to disk.
35963//
35964// This is like `Save` except that tensors can be listed in the saved file as being
35965// a slice of a larger tensor.  `shapes_and_slices` specifies the shape of the
35966// larger tensor and the slice that this tensor covers. `shapes_and_slices` must
35967// have as many elements as `tensor_names`.
35968//
35969// Elements of the `shapes_and_slices` input must either be:
35970//
35971// *  The empty string, in which case the corresponding tensor is
35972//    saved normally.
35973// *  A string of the form `dim0 dim1 ... dimN-1 slice-spec` where the
35974//    `dimI` are the dimensions of the larger tensor and `slice-spec`
35975//    specifies what part is covered by the tensor to save.
35976//
35977// `slice-spec` itself is a `:`-separated list: `slice0:slice1:...:sliceN-1`
35978// where each `sliceI` is either:
35979//
35980// *  The string `-` meaning that the slice covers all indices of this dimension
35981// *  `start,length` where `start` and `length` are integers.  In that
35982//    case the slice covers `length` indices starting at `start`.
35983//
35984// See also `Save`.
35985//
35986// Arguments:
35987//	filename: Must have a single element. The name of the file to which we write the
35988// tensor.
35989//	tensor_names: Shape `[N]`. The names of the tensors to be saved.
35990//	shapes_and_slices: Shape `[N]`.  The shapes and slice specifications to use when
35991// saving the tensors.
35992//	data: `N` tensors to save.
35993//
35994// Returns the created operation.
35995func SaveSlices(scope *Scope, filename tf.Output, tensor_names tf.Output, shapes_and_slices tf.Output, data []tf.Output) (o *tf.Operation) {
35996	if scope.Err() != nil {
35997		return
35998	}
35999	opspec := tf.OpSpec{
36000		Type: "SaveSlices",
36001		Input: []tf.Input{
36002			filename, tensor_names, shapes_and_slices, tf.OutputList(data),
36003		},
36004	}
36005	return scope.AddOperation(opspec)
36006}
36007
36008// DenseToDenseSetOperationAttr is an optional argument to DenseToDenseSetOperation.
36009type DenseToDenseSetOperationAttr func(optionalAttr)
36010
36011// DenseToDenseSetOperationValidateIndices sets the optional validate_indices attribute to value.
36012// If not specified, defaults to true
36013func DenseToDenseSetOperationValidateIndices(value bool) DenseToDenseSetOperationAttr {
36014	return func(m optionalAttr) {
36015		m["validate_indices"] = value
36016	}
36017}
36018
36019// Applies set operation along last dimension of 2 `Tensor` inputs.
36020//
36021// See SetOperationOp::SetOperationFromContext for values of `set_operation`.
36022//
36023// Output `result` is a `SparseTensor` represented by `result_indices`,
36024// `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this
36025// has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`
36026// dimension contains the result of `set_operation` applied to the corresponding
36027// `[0...n-1]` dimension of `set`.
36028//
36029// Arguments:
36030//	set1: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`.
36031// Dimension `n` contains values in a set, duplicates are allowed but ignored.
36032//	set2: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set1`.
36033// Dimension `n` contains values in a set, duplicates are allowed but ignored.
36034//
36035//
36036// Returns 2D indices of a `SparseTensor`.1D values of a `SparseTensor`.1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is
36037// the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`
36038// is the max result set size across all `0...n-1` dimensions.
36039func DenseToDenseSetOperation(scope *Scope, set1 tf.Output, set2 tf.Output, set_operation string, optional ...DenseToDenseSetOperationAttr) (result_indices tf.Output, result_values tf.Output, result_shape tf.Output) {
36040	if scope.Err() != nil {
36041		return
36042	}
36043	attrs := map[string]interface{}{"set_operation": set_operation}
36044	for _, a := range optional {
36045		a(attrs)
36046	}
36047	opspec := tf.OpSpec{
36048		Type: "DenseToDenseSetOperation",
36049		Input: []tf.Input{
36050			set1, set2,
36051		},
36052		Attrs: attrs,
36053	}
36054	op := scope.AddOperation(opspec)
36055	return op.Output(0), op.Output(1), op.Output(2)
36056}
36057
36058// Generate a sharded filename. The filename is printf formatted as
36059//
36060//    %s-%05d-of-%05d, basename, shard, num_shards.
36061func ShardedFilename(scope *Scope, basename tf.Output, shard tf.Output, num_shards tf.Output) (filename tf.Output) {
36062	if scope.Err() != nil {
36063		return
36064	}
36065	opspec := tf.OpSpec{
36066		Type: "ShardedFilename",
36067		Input: []tf.Input{
36068			basename, shard, num_shards,
36069		},
36070	}
36071	op := scope.AddOperation(opspec)
36072	return op.Output(0)
36073}
36074
36075// Generate a glob pattern matching all sharded file names.
36076func ShardedFilespec(scope *Scope, basename tf.Output, num_shards tf.Output) (filename tf.Output) {
36077	if scope.Err() != nil {
36078		return
36079	}
36080	opspec := tf.OpSpec{
36081		Type: "ShardedFilespec",
36082		Input: []tf.Input{
36083			basename, num_shards,
36084		},
36085	}
36086	op := scope.AddOperation(opspec)
36087	return op.Output(0)
36088}
36089
36090// TextLineReaderV2Attr is an optional argument to TextLineReaderV2.
36091type TextLineReaderV2Attr func(optionalAttr)
36092
36093// TextLineReaderV2SkipHeaderLines sets the optional skip_header_lines attribute to value.
36094//
36095// value: Number of lines to skip from the beginning of every file.
36096// If not specified, defaults to 0
36097func TextLineReaderV2SkipHeaderLines(value int64) TextLineReaderV2Attr {
36098	return func(m optionalAttr) {
36099		m["skip_header_lines"] = value
36100	}
36101}
36102
36103// TextLineReaderV2Container sets the optional container attribute to value.
36104//
36105// value: If non-empty, this reader is placed in the given container.
36106// Otherwise, a default container is used.
36107// If not specified, defaults to ""
36108func TextLineReaderV2Container(value string) TextLineReaderV2Attr {
36109	return func(m optionalAttr) {
36110		m["container"] = value
36111	}
36112}
36113
36114// TextLineReaderV2SharedName sets the optional shared_name attribute to value.
36115//
36116// value: If non-empty, this reader is named in the given bucket
36117// with this shared_name. Otherwise, the node name is used instead.
36118// If not specified, defaults to ""
36119func TextLineReaderV2SharedName(value string) TextLineReaderV2Attr {
36120	return func(m optionalAttr) {
36121		m["shared_name"] = value
36122	}
36123}
36124
36125// A Reader that outputs the lines of a file delimited by '\n'.
36126//
36127// Returns The handle to reference the Reader.
36128func TextLineReaderV2(scope *Scope, optional ...TextLineReaderV2Attr) (reader_handle tf.Output) {
36129	if scope.Err() != nil {
36130		return
36131	}
36132	attrs := map[string]interface{}{}
36133	for _, a := range optional {
36134		a(attrs)
36135	}
36136	opspec := tf.OpSpec{
36137		Type: "TextLineReaderV2",
36138
36139		Attrs: attrs,
36140	}
36141	op := scope.AddOperation(opspec)
36142	return op.Output(0)
36143}
36144
36145// LoadAndRemapMatrixAttr is an optional argument to LoadAndRemapMatrix.
36146type LoadAndRemapMatrixAttr func(optionalAttr)
36147
36148// LoadAndRemapMatrixMaxRowsInMemory sets the optional max_rows_in_memory attribute to value.
36149//
36150// value: The maximum number of rows to load from the checkpoint at
36151// once. If less than or equal to 0, the entire matrix will be loaded into
36152// memory. Setting this arg trades increased disk reads for lower memory usage.
36153// If not specified, defaults to -1
36154func LoadAndRemapMatrixMaxRowsInMemory(value int64) LoadAndRemapMatrixAttr {
36155	return func(m optionalAttr) {
36156		m["max_rows_in_memory"] = value
36157	}
36158}
36159
36160// Loads a 2-D (matrix) `Tensor` with name `old_tensor_name` from the checkpoint
36161//
36162// at `ckpt_path` and potentially reorders its rows and columns using the
36163// specified remappings.
36164//
36165// Most users should use one of the wrapper initializers (such as
36166// `tf.contrib.framework.load_and_remap_matrix_initializer`) instead of this
36167// function directly.
36168//
36169// The remappings are 1-D tensors with the following properties:
36170//
36171// * `row_remapping` must have exactly `num_rows` entries. Row `i` of the output
36172//   matrix will be initialized from the row corresponding to index
36173//   `row_remapping[i]` in the old `Tensor` from the checkpoint.
36174// * `col_remapping` must have either 0 entries (indicating that no column
36175//   reordering is needed) or `num_cols` entries. If specified, column `j` of the
36176//   output matrix will be initialized from the column corresponding to index
36177//   `col_remapping[j]` in the old `Tensor` from the checkpoint.
36178// * A value of -1 in either of the remappings signifies a "missing" entry. In that
36179//   case, values from the `initializing_values` tensor will be used to fill that
36180//   missing row or column. If `row_remapping` has `r` missing entries and
36181//   `col_remapping` has `c` missing entries, then the following condition must be
36182//   true:
36183//
36184// `(r * num_cols) + (c * num_rows) - (r * c) == len(initializing_values)`
36185//
36186// The remapping tensors can be generated using the GenerateVocabRemapping op.
36187//
36188// As an example, with row_remapping = [1, 0, -1], col_remapping = [0, 2, -1],
36189// initializing_values = [0.5, -0.5, 0.25, -0.25, 42], and w(i, j) representing
36190// the value from row i, column j of the old tensor in the checkpoint, the output
36191// matrix will look like the following:
36192//
36193// [[w(1, 0),  w(1, 2),  0.5],
36194//  [w(0, 0),  w(0, 2), -0.5],
36195//  [0.25,    -0.25,      42]]
36196//
36197// Arguments:
36198//	ckpt_path: Path to the TensorFlow checkpoint (version 2, `TensorBundle`) from
36199// which the old matrix `Tensor` will be loaded.
36200//	old_tensor_name: Name of the 2-D `Tensor` to load from checkpoint.
36201//	row_remapping: An int `Tensor` of row remappings (generally created by
36202// `generate_vocab_remapping`).  Even if no row remapping is needed, this must
36203// still be an index-valued Tensor (e.g. [0, 1, 2, ...]), or a shifted
36204// index-valued `Tensor` (e.g. [8, 9, 10, ...], for partitioned `Variables`).
36205//	col_remapping: An int `Tensor` of column remappings (generally created by
36206// `generate_vocab_remapping`).  May be a size-0 `Tensor` if only row remapping
36207// is to be done (e.g. column ordering is the same).
36208//	initializing_values: A float `Tensor` containing  values to fill in for cells
36209// in the output matrix that are not loaded from the checkpoint. Length must be
36210// exactly the same as the number of missing / new cells.
36211//	num_rows: Number of rows (length of the 1st dimension) in the output matrix.
36212//	num_cols: Number of columns (length of the 2nd dimension) in the output matrix.
36213//
36214// Returns Output matrix containing existing values loaded from the
36215// checkpoint, and with any missing values filled in from initializing_values.
36216func LoadAndRemapMatrix(scope *Scope, ckpt_path tf.Output, old_tensor_name tf.Output, row_remapping tf.Output, col_remapping tf.Output, initializing_values tf.Output, num_rows int64, num_cols int64, optional ...LoadAndRemapMatrixAttr) (output_matrix tf.Output) {
36217	if scope.Err() != nil {
36218		return
36219	}
36220	attrs := map[string]interface{}{"num_rows": num_rows, "num_cols": num_cols}
36221	for _, a := range optional {
36222		a(attrs)
36223	}
36224	opspec := tf.OpSpec{
36225		Type: "LoadAndRemapMatrix",
36226		Input: []tf.Input{
36227			ckpt_path, old_tensor_name, row_remapping, col_remapping, initializing_values,
36228		},
36229		Attrs: attrs,
36230	}
36231	op := scope.AddOperation(opspec)
36232	return op.Output(0)
36233}
36234
36235// TFRecordReaderV2Attr is an optional argument to TFRecordReaderV2.
36236type TFRecordReaderV2Attr func(optionalAttr)
36237
36238// TFRecordReaderV2Container sets the optional container attribute to value.
36239//
36240// value: If non-empty, this reader is placed in the given container.
36241// Otherwise, a default container is used.
36242// If not specified, defaults to ""
36243func TFRecordReaderV2Container(value string) TFRecordReaderV2Attr {
36244	return func(m optionalAttr) {
36245		m["container"] = value
36246	}
36247}
36248
36249// TFRecordReaderV2SharedName sets the optional shared_name attribute to value.
36250//
36251// value: If non-empty, this reader is named in the given bucket
36252// with this shared_name. Otherwise, the node name is used instead.
36253// If not specified, defaults to ""
36254func TFRecordReaderV2SharedName(value string) TFRecordReaderV2Attr {
36255	return func(m optionalAttr) {
36256		m["shared_name"] = value
36257	}
36258}
36259
36260// TFRecordReaderV2CompressionType sets the optional compression_type attribute to value.
36261// If not specified, defaults to ""
36262func TFRecordReaderV2CompressionType(value string) TFRecordReaderV2Attr {
36263	return func(m optionalAttr) {
36264		m["compression_type"] = value
36265	}
36266}
36267
36268// A Reader that outputs the records from a TensorFlow Records file.
36269//
36270// Returns The handle to reference the Reader.
36271func TFRecordReaderV2(scope *Scope, optional ...TFRecordReaderV2Attr) (reader_handle tf.Output) {
36272	if scope.Err() != nil {
36273		return
36274	}
36275	attrs := map[string]interface{}{}
36276	for _, a := range optional {
36277		a(attrs)
36278	}
36279	opspec := tf.OpSpec{
36280		Type: "TFRecordReaderV2",
36281
36282		Attrs: attrs,
36283	}
36284	op := scope.AddOperation(opspec)
36285	return op.Output(0)
36286}
36287
36288// QuantizeAndDequantizeV3Attr is an optional argument to QuantizeAndDequantizeV3.
36289type QuantizeAndDequantizeV3Attr func(optionalAttr)
36290
36291// QuantizeAndDequantizeV3SignedInput sets the optional signed_input attribute to value.
36292// If not specified, defaults to true
36293func QuantizeAndDequantizeV3SignedInput(value bool) QuantizeAndDequantizeV3Attr {
36294	return func(m optionalAttr) {
36295		m["signed_input"] = value
36296	}
36297}
36298
36299// QuantizeAndDequantizeV3RangeGiven sets the optional range_given attribute to value.
36300// If not specified, defaults to true
36301func QuantizeAndDequantizeV3RangeGiven(value bool) QuantizeAndDequantizeV3Attr {
36302	return func(m optionalAttr) {
36303		m["range_given"] = value
36304	}
36305}
36306
36307// Quantizes then dequantizes a tensor.
36308//
36309// This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a
36310// tensor, so its value can change during training.
36311func QuantizeAndDequantizeV3(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, num_bits tf.Output, optional ...QuantizeAndDequantizeV3Attr) (output tf.Output) {
36312	if scope.Err() != nil {
36313		return
36314	}
36315	attrs := map[string]interface{}{}
36316	for _, a := range optional {
36317		a(attrs)
36318	}
36319	opspec := tf.OpSpec{
36320		Type: "QuantizeAndDequantizeV3",
36321		Input: []tf.Input{
36322			input, input_min, input_max, num_bits,
36323		},
36324		Attrs: attrs,
36325	}
36326	op := scope.AddOperation(opspec)
36327	return op.Output(0)
36328}
36329
36330// IdentityReaderV2Attr is an optional argument to IdentityReaderV2.
36331type IdentityReaderV2Attr func(optionalAttr)
36332
36333// IdentityReaderV2Container sets the optional container attribute to value.
36334//
36335// value: If non-empty, this reader is placed in the given container.
36336// Otherwise, a default container is used.
36337// If not specified, defaults to ""
36338func IdentityReaderV2Container(value string) IdentityReaderV2Attr {
36339	return func(m optionalAttr) {
36340		m["container"] = value
36341	}
36342}
36343
36344// IdentityReaderV2SharedName sets the optional shared_name attribute to value.
36345//
36346// value: If non-empty, this reader is named in the given bucket
36347// with this shared_name. Otherwise, the node name is used instead.
36348// If not specified, defaults to ""
36349func IdentityReaderV2SharedName(value string) IdentityReaderV2Attr {
36350	return func(m optionalAttr) {
36351		m["shared_name"] = value
36352	}
36353}
36354
36355// A Reader that outputs the queued work as both the key and value.
36356//
36357// To use, enqueue strings in a Queue.  ReaderRead will take the front
36358// work string and output (work, work).
36359//
36360// Returns The handle to reference the Reader.
36361func IdentityReaderV2(scope *Scope, optional ...IdentityReaderV2Attr) (reader_handle tf.Output) {
36362	if scope.Err() != nil {
36363		return
36364	}
36365	attrs := map[string]interface{}{}
36366	for _, a := range optional {
36367		a(attrs)
36368	}
36369	opspec := tf.OpSpec{
36370		Type: "IdentityReaderV2",
36371
36372		Attrs: attrs,
36373	}
36374	op := scope.AddOperation(opspec)
36375	return op.Output(0)
36376}
36377
36378// ResourceApplyGradientDescentAttr is an optional argument to ResourceApplyGradientDescent.
36379type ResourceApplyGradientDescentAttr func(optionalAttr)
36380
36381// ResourceApplyGradientDescentUseLocking sets the optional use_locking attribute to value.
36382//
36383// value: If `True`, the subtraction will be protected by a lock;
36384// otherwise the behavior is undefined, but may exhibit less contention.
36385// If not specified, defaults to false
36386func ResourceApplyGradientDescentUseLocking(value bool) ResourceApplyGradientDescentAttr {
36387	return func(m optionalAttr) {
36388		m["use_locking"] = value
36389	}
36390}
36391
36392// Update '*var' by subtracting 'alpha' * 'delta' from it.
36393//
36394// Arguments:
36395//	var_: Should be from a Variable().
36396//	alpha: Scaling factor. Must be a scalar.
36397//	delta: The change.
36398//
36399// Returns the created operation.
36400func ResourceApplyGradientDescent(scope *Scope, var_ tf.Output, alpha tf.Output, delta tf.Output, optional ...ResourceApplyGradientDescentAttr) (o *tf.Operation) {
36401	if scope.Err() != nil {
36402		return
36403	}
36404	attrs := map[string]interface{}{}
36405	for _, a := range optional {
36406		a(attrs)
36407	}
36408	opspec := tf.OpSpec{
36409		Type: "ResourceApplyGradientDescent",
36410		Input: []tf.Input{
36411			var_, alpha, delta,
36412		},
36413		Attrs: attrs,
36414	}
36415	return scope.AddOperation(opspec)
36416}
36417
36418// Returns the next record (key, value pair) produced by a Reader.
36419//
36420// Will dequeue from the input queue if necessary (e.g. when the
36421// Reader needs to start reading from a new file since it has finished
36422// with the previous file).
36423//
36424// Arguments:
36425//	reader_handle: Handle to a Reader.
36426//	queue_handle: Handle to a Queue, with string work items.
36427//
36428// Returns A scalar.A scalar.
36429func ReaderReadV2(scope *Scope, reader_handle tf.Output, queue_handle tf.Output) (key tf.Output, value tf.Output) {
36430	if scope.Err() != nil {
36431		return
36432	}
36433	opspec := tf.OpSpec{
36434		Type: "ReaderReadV2",
36435		Input: []tf.Input{
36436			reader_handle, queue_handle,
36437		},
36438	}
36439	op := scope.AddOperation(opspec)
36440	return op.Output(0), op.Output(1)
36441}
36442
36443// Returns up to `num_records` (key, value) pairs produced by a Reader.
36444//
36445// Will dequeue from the input queue if necessary (e.g. when the
36446// Reader needs to start reading from a new file since it has finished
36447// with the previous file).
36448// It may return less than `num_records` even before the last batch.
36449//
36450// Arguments:
36451//	reader_handle: Handle to a `Reader`.
36452//	queue_handle: Handle to a `Queue`, with string work items.
36453//	num_records: number of records to read from `Reader`.
36454//
36455// Returns A 1-D tensor.A 1-D tensor.
36456func ReaderReadUpToV2(scope *Scope, reader_handle tf.Output, queue_handle tf.Output, num_records tf.Output) (keys tf.Output, values tf.Output) {
36457	if scope.Err() != nil {
36458		return
36459	}
36460	opspec := tf.OpSpec{
36461		Type: "ReaderReadUpToV2",
36462		Input: []tf.Input{
36463			reader_handle, queue_handle, num_records,
36464		},
36465	}
36466	op := scope.AddOperation(opspec)
36467	return op.Output(0), op.Output(1)
36468}
36469
36470//     Adds v into specified rows of x.
36471//
36472//     Computes y = x; y[i, :] += v; return y.
36473//
36474// Arguments:
36475//	x: A `Tensor` of type T.
36476//	i: A vector. Indices into the left-most dimension of `x`.
36477//	v: A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size.
36478//
36479// Returns A `Tensor` of type T. An alias of `x`. The content of `y` is undefined if there are duplicates in `i`.
36480func InplaceAdd(scope *Scope, x tf.Output, i tf.Output, v tf.Output) (y tf.Output) {
36481	if scope.Err() != nil {
36482		return
36483	}
36484	opspec := tf.OpSpec{
36485		Type: "InplaceAdd",
36486		Input: []tf.Input{
36487			x, i, v,
36488		},
36489	}
36490	op := scope.AddOperation(opspec)
36491	return op.Output(0)
36492}
36493
36494// Restore a Reader to its initial clean state.
36495//
36496// Arguments:
36497//	reader_handle: Handle to a Reader.
36498//
36499// Returns the created operation.
36500func ReaderResetV2(scope *Scope, reader_handle tf.Output) (o *tf.Operation) {
36501	if scope.Err() != nil {
36502		return
36503	}
36504	opspec := tf.OpSpec{
36505		Type: "ReaderResetV2",
36506		Input: []tf.Input{
36507			reader_handle,
36508		},
36509	}
36510	return scope.AddOperation(opspec)
36511}
36512
36513// BatchAttr is an optional argument to Batch.
36514type BatchAttr func(optionalAttr)
36515
36516// BatchMaxEnqueuedBatches sets the optional max_enqueued_batches attribute to value.
36517// If not specified, defaults to 10
36518func BatchMaxEnqueuedBatches(value int64) BatchAttr {
36519	return func(m optionalAttr) {
36520		m["max_enqueued_batches"] = value
36521	}
36522}
36523
36524// BatchAllowedBatchSizes sets the optional allowed_batch_sizes attribute to value.
36525// If not specified, defaults to <>
36526func BatchAllowedBatchSizes(value []int64) BatchAttr {
36527	return func(m optionalAttr) {
36528		m["allowed_batch_sizes"] = value
36529	}
36530}
36531
36532// BatchContainer sets the optional container attribute to value.
36533// If not specified, defaults to ""
36534func BatchContainer(value string) BatchAttr {
36535	return func(m optionalAttr) {
36536		m["container"] = value
36537	}
36538}
36539
36540// BatchSharedName sets the optional shared_name attribute to value.
36541// If not specified, defaults to ""
36542func BatchSharedName(value string) BatchAttr {
36543	return func(m optionalAttr) {
36544		m["shared_name"] = value
36545	}
36546}
36547
36548// BatchBatchingQueue sets the optional batching_queue attribute to value.
36549// If not specified, defaults to ""
36550func BatchBatchingQueue(value string) BatchAttr {
36551	return func(m optionalAttr) {
36552		m["batching_queue"] = value
36553	}
36554}
36555
36556// Batches all input tensors nondeterministically.
36557//
36558// When many instances of this Op are being run concurrently with the same
36559// container/shared_name in the same device, some will output zero-shaped Tensors
36560// and others will output Tensors of size up to max_batch_size.
36561//
36562// All Tensors in in_tensors are batched together (so, for example, labels and
36563// features should be batched with a single instance of this operation.
36564//
36565// Each invocation of batch emits an `id` scalar which will be used to identify
36566// this particular invocation when doing unbatch or its gradient.
36567//
36568// Each op which emits a non-empty batch will also emit a non-empty batch_index
36569// Tensor, which, is a [K, 3] matrix where each row contains the invocation's id,
36570// start, and length of elements of each set of Tensors present in batched_tensors.
36571//
36572// Batched tensors are concatenated along the first dimension, and all tensors in
36573// in_tensors must have the first dimension of the same size.
36574//
36575// in_tensors: The tensors to be batched.
36576// num_batch_threads: Number of scheduling threads for processing batches of work.
36577//  Determines the number of batches processed in parallel.
36578// max_batch_size: Batch sizes will never be bigger than this.
36579// batch_timeout_micros: Maximum number of microseconds to wait before outputting
36580//  an incomplete batch.
36581// allowed_batch_sizes: Optional list of allowed batch sizes. If left empty, does
36582//  nothing. Otherwise, supplies a list of batch sizes, causing the op to pad
36583//  batches up to one of those sizes. The entries must increase monotonically, and
36584//  the final entry must equal max_batch_size.
36585// grad_timeout_micros: The timeout to use for the gradient. See Unbatch.
36586// batched_tensors: Either empty tensors or a batch of concatenated Tensors.
36587// batch_index: If out_tensors is non-empty, has information to invert it.
36588// container: Controls the scope of sharing of this batch.
36589// id: always contains a scalar with a unique ID for this invocation of Batch.
36590// shared_name: Concurrently running instances of batch in the same device with the
36591//  same container and shared_name will batch their elements together. If left
36592//  empty, the op name will be used as the shared name.
36593// T: the types of tensors to be batched.
36594func Batch(scope *Scope, in_tensors []tf.Output, num_batch_threads int64, max_batch_size int64, batch_timeout_micros int64, grad_timeout_micros int64, optional ...BatchAttr) (batched_tensors []tf.Output, batch_index tf.Output, id tf.Output) {
36595	if scope.Err() != nil {
36596		return
36597	}
36598	attrs := map[string]interface{}{"num_batch_threads": num_batch_threads, "max_batch_size": max_batch_size, "batch_timeout_micros": batch_timeout_micros, "grad_timeout_micros": grad_timeout_micros}
36599	for _, a := range optional {
36600		a(attrs)
36601	}
36602	opspec := tf.OpSpec{
36603		Type: "Batch",
36604		Input: []tf.Input{
36605			tf.OutputList(in_tensors),
36606		},
36607		Attrs: attrs,
36608	}
36609	op := scope.AddOperation(opspec)
36610	if scope.Err() != nil {
36611		return
36612	}
36613	var idx int
36614	var err error
36615	if batched_tensors, idx, err = makeOutputList(op, idx, "batched_tensors"); err != nil {
36616		scope.UpdateErr("Batch", err)
36617		return
36618	}
36619	batch_index = op.Output(idx)
36620	id = op.Output(idx)
36621	return batched_tensors, batch_index, id
36622}
36623
36624// Adjust the hue of one or more images.
36625//
36626// `images` is a tensor of at least 3 dimensions.  The last dimension is
36627// interpretted as channels, and must be three.
36628//
36629// The input image is considered in the RGB colorspace. Conceptually, the RGB
36630// colors are first mapped into HSV. A delta is then applied all the hue values,
36631// and then remapped back to RGB colorspace.
36632//
36633// Arguments:
36634//	images: Images to adjust.  At least 3-D.
36635//	delta: A float delta to add to the hue.
36636//
36637// Returns The hue-adjusted image or images.
36638func AdjustHue(scope *Scope, images tf.Output, delta tf.Output) (output tf.Output) {
36639	if scope.Err() != nil {
36640		return
36641	}
36642	opspec := tf.OpSpec{
36643		Type: "AdjustHue",
36644		Input: []tf.Input{
36645			images, delta,
36646		},
36647	}
36648	op := scope.AddOperation(opspec)
36649	return op.Output(0)
36650}
36651
36652// ResizeBicubicGradAttr is an optional argument to ResizeBicubicGrad.
36653type ResizeBicubicGradAttr func(optionalAttr)
36654
36655// ResizeBicubicGradAlignCorners sets the optional align_corners attribute to value.
36656//
36657// value: If true, the centers of the 4 corner pixels of the input and grad tensors are
36658// aligned. Defaults to false.
36659// If not specified, defaults to false
36660func ResizeBicubicGradAlignCorners(value bool) ResizeBicubicGradAttr {
36661	return func(m optionalAttr) {
36662		m["align_corners"] = value
36663	}
36664}
36665
36666// ResizeBicubicGradHalfPixelCenters sets the optional half_pixel_centers attribute to value.
36667// If not specified, defaults to false
36668func ResizeBicubicGradHalfPixelCenters(value bool) ResizeBicubicGradAttr {
36669	return func(m optionalAttr) {
36670		m["half_pixel_centers"] = value
36671	}
36672}
36673
36674// Computes the gradient of bicubic interpolation.
36675//
36676// Arguments:
36677//	grads: 4-D with shape `[batch, height, width, channels]`.
36678//	original_image: 4-D with shape `[batch, orig_height, orig_width, channels]`,
36679// The image tensor that was resized.
36680//
36681// Returns 4-D with shape `[batch, orig_height, orig_width, channels]`.
36682// Gradients with respect to the input image. Input image must have been
36683// float or double.
36684func ResizeBicubicGrad(scope *Scope, grads tf.Output, original_image tf.Output, optional ...ResizeBicubicGradAttr) (output tf.Output) {
36685	if scope.Err() != nil {
36686		return
36687	}
36688	attrs := map[string]interface{}{}
36689	for _, a := range optional {
36690		a(attrs)
36691	}
36692	opspec := tf.OpSpec{
36693		Type: "ResizeBicubicGrad",
36694		Input: []tf.Input{
36695			grads, original_image,
36696		},
36697		Attrs: attrs,
36698	}
36699	op := scope.AddOperation(opspec)
36700	return op.Output(0)
36701}
36702
36703// ResizeNearestNeighborAttr is an optional argument to ResizeNearestNeighbor.
36704type ResizeNearestNeighborAttr func(optionalAttr)
36705
36706// ResizeNearestNeighborAlignCorners sets the optional align_corners attribute to value.
36707//
36708// value: If true, the centers of the 4 corner pixels of the input and output tensors are
36709// aligned, preserving the values at the corner pixels. Defaults to false.
36710// If not specified, defaults to false
36711func ResizeNearestNeighborAlignCorners(value bool) ResizeNearestNeighborAttr {
36712	return func(m optionalAttr) {
36713		m["align_corners"] = value
36714	}
36715}
36716
36717// ResizeNearestNeighborHalfPixelCenters sets the optional half_pixel_centers attribute to value.
36718// If not specified, defaults to false
36719func ResizeNearestNeighborHalfPixelCenters(value bool) ResizeNearestNeighborAttr {
36720	return func(m optionalAttr) {
36721		m["half_pixel_centers"] = value
36722	}
36723}
36724
36725// Resize `images` to `size` using nearest neighbor interpolation.
36726//
36727// Arguments:
36728//	images: 4-D with shape `[batch, height, width, channels]`.
36729//	size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
36730// new size for the images.
36731//
36732// Returns 4-D with shape
36733// `[batch, new_height, new_width, channels]`.
36734func ResizeNearestNeighbor(scope *Scope, images tf.Output, size tf.Output, optional ...ResizeNearestNeighborAttr) (resized_images tf.Output) {
36735	if scope.Err() != nil {
36736		return
36737	}
36738	attrs := map[string]interface{}{}
36739	for _, a := range optional {
36740		a(attrs)
36741	}
36742	opspec := tf.OpSpec{
36743		Type: "ResizeNearestNeighbor",
36744		Input: []tf.Input{
36745			images, size,
36746		},
36747		Attrs: attrs,
36748	}
36749	op := scope.AddOperation(opspec)
36750	return op.Output(0)
36751}
36752
36753// ResizeNearestNeighborGradAttr is an optional argument to ResizeNearestNeighborGrad.
36754type ResizeNearestNeighborGradAttr func(optionalAttr)
36755
36756// ResizeNearestNeighborGradAlignCorners sets the optional align_corners attribute to value.
36757//
36758// value: If true, the centers of the 4 corner pixels of the input and grad tensors are
36759// aligned. Defaults to false.
36760// If not specified, defaults to false
36761func ResizeNearestNeighborGradAlignCorners(value bool) ResizeNearestNeighborGradAttr {
36762	return func(m optionalAttr) {
36763		m["align_corners"] = value
36764	}
36765}
36766
36767// ResizeNearestNeighborGradHalfPixelCenters sets the optional half_pixel_centers attribute to value.
36768// If not specified, defaults to false
36769func ResizeNearestNeighborGradHalfPixelCenters(value bool) ResizeNearestNeighborGradAttr {
36770	return func(m optionalAttr) {
36771		m["half_pixel_centers"] = value
36772	}
36773}
36774
36775// Computes the gradient of nearest neighbor interpolation.
36776//
36777// Arguments:
36778//	grads: 4-D with shape `[batch, height, width, channels]`.
36779//	size: = A 1-D int32 Tensor of 2 elements: `orig_height, orig_width`. The
36780// original input size.
36781//
36782// Returns 4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients
36783// with respect to the input image.
36784func ResizeNearestNeighborGrad(scope *Scope, grads tf.Output, size tf.Output, optional ...ResizeNearestNeighborGradAttr) (output tf.Output) {
36785	if scope.Err() != nil {
36786		return
36787	}
36788	attrs := map[string]interface{}{}
36789	for _, a := range optional {
36790		a(attrs)
36791	}
36792	opspec := tf.OpSpec{
36793		Type: "ResizeNearestNeighborGrad",
36794		Input: []tf.Input{
36795			grads, size,
36796		},
36797		Attrs: attrs,
36798	}
36799	op := scope.AddOperation(opspec)
36800	return op.Output(0)
36801}
36802
36803// ExtractJpegShapeAttr is an optional argument to ExtractJpegShape.
36804type ExtractJpegShapeAttr func(optionalAttr)
36805
36806// ExtractJpegShapeOutputType sets the optional output_type attribute to value.
36807//
36808// value: (Optional) The output type of the operation (int32 or int64).
36809// Defaults to int32.
36810// If not specified, defaults to DT_INT32
36811func ExtractJpegShapeOutputType(value tf.DataType) ExtractJpegShapeAttr {
36812	return func(m optionalAttr) {
36813		m["output_type"] = value
36814	}
36815}
36816
36817// Extract the shape information of a JPEG-encoded image.
36818//
36819// This op only parses the image header, so it is much faster than DecodeJpeg.
36820//
36821// Arguments:
36822//	contents: 0-D. The JPEG-encoded image.
36823//
36824// Returns 1-D. The image shape with format [height, width, channels].
36825func ExtractJpegShape(scope *Scope, contents tf.Output, optional ...ExtractJpegShapeAttr) (image_shape tf.Output) {
36826	if scope.Err() != nil {
36827		return
36828	}
36829	attrs := map[string]interface{}{}
36830	for _, a := range optional {
36831		a(attrs)
36832	}
36833	opspec := tf.OpSpec{
36834		Type: "ExtractJpegShape",
36835		Input: []tf.Input{
36836			contents,
36837		},
36838		Attrs: attrs,
36839	}
36840	op := scope.AddOperation(opspec)
36841	return op.Output(0)
36842}
36843
36844// DecodePngAttr is an optional argument to DecodePng.
36845type DecodePngAttr func(optionalAttr)
36846
36847// DecodePngChannels sets the optional channels attribute to value.
36848//
36849// value: Number of color channels for the decoded image.
36850// If not specified, defaults to 0
36851func DecodePngChannels(value int64) DecodePngAttr {
36852	return func(m optionalAttr) {
36853		m["channels"] = value
36854	}
36855}
36856
36857// DecodePngDtype sets the optional dtype attribute to value.
36858// If not specified, defaults to DT_UINT8
36859func DecodePngDtype(value tf.DataType) DecodePngAttr {
36860	return func(m optionalAttr) {
36861		m["dtype"] = value
36862	}
36863}
36864
36865// Decode a PNG-encoded image to a uint8 or uint16 tensor.
36866//
36867// The attr `channels` indicates the desired number of color channels for the
36868// decoded image.
36869//
36870// Accepted values are:
36871//
36872// *   0: Use the number of channels in the PNG-encoded image.
36873// *   1: output a grayscale image.
36874// *   3: output an RGB image.
36875// *   4: output an RGBA image.
36876//
36877// If needed, the PNG-encoded image is transformed to match the requested number
36878// of color channels.
36879//
36880// This op also supports decoding JPEGs and non-animated GIFs since the interface
36881// is the same, though it is cleaner to use `tf.image.decode_image`.
36882//
36883// Arguments:
36884//	contents: 0-D.  The PNG-encoded image.
36885//
36886// Returns 3-D with shape `[height, width, channels]`.
36887func DecodePng(scope *Scope, contents tf.Output, optional ...DecodePngAttr) (image tf.Output) {
36888	if scope.Err() != nil {
36889		return
36890	}
36891	attrs := map[string]interface{}{}
36892	for _, a := range optional {
36893		a(attrs)
36894	}
36895	opspec := tf.OpSpec{
36896		Type: "DecodePng",
36897		Input: []tf.Input{
36898			contents,
36899		},
36900		Attrs: attrs,
36901	}
36902	op := scope.AddOperation(opspec)
36903	return op.Output(0)
36904}
36905
36906// Decode the first frame of a GIF-encoded image to a uint8 tensor.
36907//
36908// GIF with frame or transparency compression are not supported
36909// convert animated GIF from compressed to uncompressed by:
36910//
36911//     convert $src.gif -coalesce $dst.gif
36912//
36913// This op also supports decoding JPEGs and PNGs, though it is cleaner to use
36914// `tf.image.decode_image`.
36915//
36916// Arguments:
36917//	contents: 0-D.  The GIF-encoded image.
36918//
36919// Returns 4-D with shape `[num_frames, height, width, 3]`. RGB order
36920func DecodeGif(scope *Scope, contents tf.Output) (image tf.Output) {
36921	if scope.Err() != nil {
36922		return
36923	}
36924	opspec := tf.OpSpec{
36925		Type: "DecodeGif",
36926		Input: []tf.Input{
36927			contents,
36928		},
36929	}
36930	op := scope.AddOperation(opspec)
36931	return op.Output(0)
36932}
36933
36934// LearnedUnigramCandidateSamplerAttr is an optional argument to LearnedUnigramCandidateSampler.
36935type LearnedUnigramCandidateSamplerAttr func(optionalAttr)
36936
36937// LearnedUnigramCandidateSamplerSeed sets the optional seed attribute to value.
36938//
36939// value: If either seed or seed2 are set to be non-zero, the random number
36940// generator is seeded by the given seed.  Otherwise, it is seeded by a
36941// random seed.
36942// If not specified, defaults to 0
36943func LearnedUnigramCandidateSamplerSeed(value int64) LearnedUnigramCandidateSamplerAttr {
36944	return func(m optionalAttr) {
36945		m["seed"] = value
36946	}
36947}
36948
36949// LearnedUnigramCandidateSamplerSeed2 sets the optional seed2 attribute to value.
36950//
36951// value: An second seed to avoid seed collision.
36952// If not specified, defaults to 0
36953func LearnedUnigramCandidateSamplerSeed2(value int64) LearnedUnigramCandidateSamplerAttr {
36954	return func(m optionalAttr) {
36955		m["seed2"] = value
36956	}
36957}
36958
36959// Generates labels for candidate sampling with a learned unigram distribution.
36960//
36961// See explanations of candidate sampling and the data formats at
36962// go/candidate-sampling.
36963//
36964// For each batch, this op picks a single set of sampled candidate labels.
36965//
36966// The advantages of sampling candidates per-batch are simplicity and the
36967// possibility of efficient dense matrix multiplication. The disadvantage is that
36968// the sampled candidates must be chosen independently of the context and of the
36969// true labels.
36970//
36971// Arguments:
36972//	true_classes: A batch_size * num_true matrix, in which each row contains the
36973// IDs of the num_true target_classes in the corresponding original label.
36974//	num_true: Number of true labels per context.
36975//	num_sampled: Number of candidates to randomly sample.
36976//	unique: If unique is true, we sample with rejection, so that all sampled
36977// candidates in a batch are unique. This requires some approximation to
36978// estimate the post-rejection sampling probabilities.
36979//	range_max: The sampler will sample integers from the interval [0, range_max).
36980//
36981// Returns A vector of length num_sampled, in which each element is
36982// the ID of a sampled candidate.A batch_size * num_true matrix, representing
36983// the number of times each candidate is expected to occur in a batch
36984// of sampled candidates. If unique=true, then this is a probability.A vector of length num_sampled, for each sampled
36985// candidate representing the number of times the candidate is expected
36986// to occur in a batch of sampled candidates.  If unique=true, then this is a
36987// probability.
36988func LearnedUnigramCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, range_max int64, optional ...LearnedUnigramCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output) {
36989	if scope.Err() != nil {
36990		return
36991	}
36992	attrs := map[string]interface{}{"num_true": num_true, "num_sampled": num_sampled, "unique": unique, "range_max": range_max}
36993	for _, a := range optional {
36994		a(attrs)
36995	}
36996	opspec := tf.OpSpec{
36997		Type: "LearnedUnigramCandidateSampler",
36998		Input: []tf.Input{
36999			true_classes,
37000		},
37001		Attrs: attrs,
37002	}
37003	op := scope.AddOperation(opspec)
37004	return op.Output(0), op.Output(1), op.Output(2)
37005}
37006
37007// RandomShuffleQueueV2Attr is an optional argument to RandomShuffleQueueV2.
37008type RandomShuffleQueueV2Attr func(optionalAttr)
37009
37010// RandomShuffleQueueV2Shapes sets the optional shapes attribute to value.
37011//
37012// value: The shape of each component in a value. The length of this attr must
37013// be either 0 or the same as the length of component_types. If the length of
37014// this attr is 0, the shapes of queue elements are not constrained, and
37015// only one element may be dequeued at a time.
37016// If not specified, defaults to <>
37017//
37018// REQUIRES: len(value) >= 0
37019func RandomShuffleQueueV2Shapes(value []tf.Shape) RandomShuffleQueueV2Attr {
37020	return func(m optionalAttr) {
37021		m["shapes"] = value
37022	}
37023}
37024
37025// RandomShuffleQueueV2Capacity sets the optional capacity attribute to value.
37026//
37027// value: The upper bound on the number of elements in this queue.
37028// Negative numbers mean no limit.
37029// If not specified, defaults to -1
37030func RandomShuffleQueueV2Capacity(value int64) RandomShuffleQueueV2Attr {
37031	return func(m optionalAttr) {
37032		m["capacity"] = value
37033	}
37034}
37035
37036// RandomShuffleQueueV2MinAfterDequeue sets the optional min_after_dequeue attribute to value.
37037//
37038// value: Dequeue will block unless there would be this
37039// many elements after the dequeue or the queue is closed. This
37040// ensures a minimum level of mixing of elements.
37041// If not specified, defaults to 0
37042func RandomShuffleQueueV2MinAfterDequeue(value int64) RandomShuffleQueueV2Attr {
37043	return func(m optionalAttr) {
37044		m["min_after_dequeue"] = value
37045	}
37046}
37047
37048// RandomShuffleQueueV2Seed sets the optional seed attribute to value.
37049//
37050// value: If either seed or seed2 is set to be non-zero, the random number
37051// generator is seeded by the given seed.  Otherwise, a random seed is used.
37052// If not specified, defaults to 0
37053func RandomShuffleQueueV2Seed(value int64) RandomShuffleQueueV2Attr {
37054	return func(m optionalAttr) {
37055		m["seed"] = value
37056	}
37057}
37058
37059// RandomShuffleQueueV2Seed2 sets the optional seed2 attribute to value.
37060//
37061// value: A second seed to avoid seed collision.
37062// If not specified, defaults to 0
37063func RandomShuffleQueueV2Seed2(value int64) RandomShuffleQueueV2Attr {
37064	return func(m optionalAttr) {
37065		m["seed2"] = value
37066	}
37067}
37068
37069// RandomShuffleQueueV2Container sets the optional container attribute to value.
37070//
37071// value: If non-empty, this queue is placed in the given container.
37072// Otherwise, a default container is used.
37073// If not specified, defaults to ""
37074func RandomShuffleQueueV2Container(value string) RandomShuffleQueueV2Attr {
37075	return func(m optionalAttr) {
37076		m["container"] = value
37077	}
37078}
37079
37080// RandomShuffleQueueV2SharedName sets the optional shared_name attribute to value.
37081//
37082// value: If non-empty, this queue will be shared under the given name
37083// across multiple sessions.
37084// If not specified, defaults to ""
37085func RandomShuffleQueueV2SharedName(value string) RandomShuffleQueueV2Attr {
37086	return func(m optionalAttr) {
37087		m["shared_name"] = value
37088	}
37089}
37090
37091// A queue that randomizes the order of elements.
37092//
37093// Arguments:
37094//	component_types: The type of each component in a value.
37095//
37096// Returns The handle to the queue.
37097func RandomShuffleQueueV2(scope *Scope, component_types []tf.DataType, optional ...RandomShuffleQueueV2Attr) (handle tf.Output) {
37098	if scope.Err() != nil {
37099		return
37100	}
37101	attrs := map[string]interface{}{"component_types": component_types}
37102	for _, a := range optional {
37103		a(attrs)
37104	}
37105	opspec := tf.OpSpec{
37106		Type: "RandomShuffleQueueV2",
37107
37108		Attrs: attrs,
37109	}
37110	op := scope.AddOperation(opspec)
37111	return op.Output(0)
37112}
37113
37114// SerializeSparseAttr is an optional argument to SerializeSparse.
37115type SerializeSparseAttr func(optionalAttr)
37116
37117// SerializeSparseOutType sets the optional out_type attribute to value.
37118//
37119// value: The `dtype` to use for serialization; the supported types are `string`
37120// (default) and `variant`.
37121// If not specified, defaults to DT_STRING
37122func SerializeSparseOutType(value tf.DataType) SerializeSparseAttr {
37123	return func(m optionalAttr) {
37124		m["out_type"] = value
37125	}
37126}
37127
37128// Serialize a `SparseTensor` into a `[3]` `Tensor` object.
37129//
37130// Arguments:
37131//	sparse_indices: 2-D.  The `indices` of the `SparseTensor`.
37132//	sparse_values: 1-D.  The `values` of the `SparseTensor`.
37133//	sparse_shape: 1-D.  The `shape` of the `SparseTensor`.
37134func SerializeSparse(scope *Scope, sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output, optional ...SerializeSparseAttr) (serialized_sparse tf.Output) {
37135	if scope.Err() != nil {
37136		return
37137	}
37138	attrs := map[string]interface{}{}
37139	for _, a := range optional {
37140		a(attrs)
37141	}
37142	opspec := tf.OpSpec{
37143		Type: "SerializeSparse",
37144		Input: []tf.Input{
37145			sparse_indices, sparse_values, sparse_shape,
37146		},
37147		Attrs: attrs,
37148	}
37149	op := scope.AddOperation(opspec)
37150	return op.Output(0)
37151}
37152
37153// Draw bounding boxes on a batch of images.
37154//
37155// Outputs a copy of `images` but draws on top of the pixels zero or more bounding
37156// boxes specified by the locations in `boxes`. The coordinates of the each
37157// bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The
37158// bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
37159// height of the underlying image.
37160//
37161// For example, if an image is 100 x 200 pixels (height x width) and the bounding
37162// box is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of
37163// the bounding box will be `(40, 10)` to `(180, 50)` (in (x,y) coordinates).
37164//
37165// Parts of the bounding box may fall outside the image.
37166//
37167// Arguments:
37168//	images: 4-D with shape `[batch, height, width, depth]`. A batch of images.
37169//	boxes: 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding
37170// boxes.
37171//
37172// Returns 4-D with the same shape as `images`. The batch of input images with
37173// bounding boxes drawn on the images.
37174func DrawBoundingBoxes(scope *Scope, images tf.Output, boxes tf.Output) (output tf.Output) {
37175	if scope.Err() != nil {
37176		return
37177	}
37178	opspec := tf.OpSpec{
37179		Type: "DrawBoundingBoxes",
37180		Input: []tf.Input{
37181			images, boxes,
37182		},
37183	}
37184	op := scope.AddOperation(opspec)
37185	return op.Output(0)
37186}
37187
37188// SampleDistortedBoundingBoxV2Attr is an optional argument to SampleDistortedBoundingBoxV2.
37189type SampleDistortedBoundingBoxV2Attr func(optionalAttr)
37190
37191// SampleDistortedBoundingBoxV2Seed sets the optional seed attribute to value.
37192//
37193// value: If either `seed` or `seed2` are set to non-zero, the random number
37194// generator is seeded by the given `seed`.  Otherwise, it is seeded by a random
37195// seed.
37196// If not specified, defaults to 0
37197func SampleDistortedBoundingBoxV2Seed(value int64) SampleDistortedBoundingBoxV2Attr {
37198	return func(m optionalAttr) {
37199		m["seed"] = value
37200	}
37201}
37202
37203// SampleDistortedBoundingBoxV2Seed2 sets the optional seed2 attribute to value.
37204//
37205// value: A second seed to avoid seed collision.
37206// If not specified, defaults to 0
37207func SampleDistortedBoundingBoxV2Seed2(value int64) SampleDistortedBoundingBoxV2Attr {
37208	return func(m optionalAttr) {
37209		m["seed2"] = value
37210	}
37211}
37212
37213// SampleDistortedBoundingBoxV2AspectRatioRange sets the optional aspect_ratio_range attribute to value.
37214//
37215// value: The cropped area of the image must have an aspect ratio =
37216// width / height within this range.
37217// If not specified, defaults to <f:0.75 f:1.33 >
37218func SampleDistortedBoundingBoxV2AspectRatioRange(value []float32) SampleDistortedBoundingBoxV2Attr {
37219	return func(m optionalAttr) {
37220		m["aspect_ratio_range"] = value
37221	}
37222}
37223
37224// SampleDistortedBoundingBoxV2AreaRange sets the optional area_range attribute to value.
37225//
37226// value: The cropped area of the image must contain a fraction of the
37227// supplied image within this range.
37228// If not specified, defaults to <f:0.05 f:1 >
37229func SampleDistortedBoundingBoxV2AreaRange(value []float32) SampleDistortedBoundingBoxV2Attr {
37230	return func(m optionalAttr) {
37231		m["area_range"] = value
37232	}
37233}
37234
37235// SampleDistortedBoundingBoxV2MaxAttempts sets the optional max_attempts attribute to value.
37236//
37237// value: Number of attempts at generating a cropped region of the image
37238// of the specified constraints. After `max_attempts` failures, return the entire
37239// image.
37240// If not specified, defaults to 100
37241func SampleDistortedBoundingBoxV2MaxAttempts(value int64) SampleDistortedBoundingBoxV2Attr {
37242	return func(m optionalAttr) {
37243		m["max_attempts"] = value
37244	}
37245}
37246
37247// SampleDistortedBoundingBoxV2UseImageIfNoBoundingBoxes sets the optional use_image_if_no_bounding_boxes attribute to value.
37248//
37249// value: Controls behavior if no bounding boxes supplied.
37250// If true, assume an implicit bounding box covering the whole input. If false,
37251// raise an error.
37252// If not specified, defaults to false
37253func SampleDistortedBoundingBoxV2UseImageIfNoBoundingBoxes(value bool) SampleDistortedBoundingBoxV2Attr {
37254	return func(m optionalAttr) {
37255		m["use_image_if_no_bounding_boxes"] = value
37256	}
37257}
37258
37259// Generate a single randomly distorted bounding box for an image.
37260//
37261// Bounding box annotations are often supplied in addition to ground-truth labels
37262// in image recognition or object localization tasks. A common technique for
37263// training such a system is to randomly distort an image while preserving
37264// its content, i.e. *data augmentation*. This Op outputs a randomly distorted
37265// localization of an object, i.e. bounding box, given an `image_size`,
37266// `bounding_boxes` and a series of constraints.
37267//
37268// The output of this Op is a single bounding box that may be used to crop the
37269// original image. The output is returned as 3 tensors: `begin`, `size` and
37270// `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the
37271// image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize
37272// what the bounding box looks like.
37273//
37274// Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The
37275// bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
37276// height of the underlying image.
37277//
37278// For example,
37279//
37280// ```python
37281//     # Generate a single distorted bounding box.
37282//     begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(
37283//         tf.shape(image),
37284//         bounding_boxes=bounding_boxes)
37285//
37286//     # Draw the bounding box in an image summary.
37287//     image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
37288//                                                   bbox_for_draw)
37289//     tf.summary.image('images_with_box', image_with_box)
37290//
37291//     # Employ the bounding box to distort the image.
37292//     distorted_image = tf.slice(image, begin, size)
37293// ```
37294//
37295// Note that if no bounding box information is available, setting
37296// `use_image_if_no_bounding_boxes = true` will assume there is a single implicit
37297// bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is
37298// false and no bounding boxes are supplied, an error is raised.
37299//
37300// Arguments:
37301//	image_size: 1-D, containing `[height, width, channels]`.
37302//	bounding_boxes: 3-D with shape `[batch, N, 4]` describing the N bounding boxes
37303// associated with the image.
37304//	min_object_covered: The cropped area of the image must contain at least this
37305// fraction of any bounding box supplied. The value of this parameter should be
37306// non-negative. In the case of 0, the cropped area does not need to overlap
37307// any of the bounding boxes supplied.
37308//
37309// Returns 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to
37310// `tf.slice`.1-D, containing `[target_height, target_width, -1]`. Provide as input to
37311// `tf.slice`.3-D with shape `[1, 1, 4]` containing the distorted bounding box.
37312// Provide as input to `tf.image.draw_bounding_boxes`.
37313func SampleDistortedBoundingBoxV2(scope *Scope, image_size tf.Output, bounding_boxes tf.Output, min_object_covered tf.Output, optional ...SampleDistortedBoundingBoxV2Attr) (begin tf.Output, size tf.Output, bboxes tf.Output) {
37314	if scope.Err() != nil {
37315		return
37316	}
37317	attrs := map[string]interface{}{}
37318	for _, a := range optional {
37319		a(attrs)
37320	}
37321	opspec := tf.OpSpec{
37322		Type: "SampleDistortedBoundingBoxV2",
37323		Input: []tf.Input{
37324			image_size, bounding_boxes, min_object_covered,
37325		},
37326		Attrs: attrs,
37327	}
37328	op := scope.AddOperation(opspec)
37329	return op.Output(0), op.Output(1), op.Output(2)
37330}
37331
37332// Computes requantization range per channel.
37333//
37334// Arguments:
37335//	input: The original input tensor.
37336//	input_min: The minimum value of the input tensor
37337//	input_max: The maximum value of the input tensor.
37338//	clip_value_max: The maximum value of the output that needs to be clipped.
37339// Example: set this to 6 for Relu6.
37340//
37341// Returns The minimum value of the final output tensorThe maximum value of the final output tensor.
37342func RequantizationRangePerChannel(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, clip_value_max float32) (output_min tf.Output, output_max tf.Output) {
37343	if scope.Err() != nil {
37344		return
37345	}
37346	attrs := map[string]interface{}{"clip_value_max": clip_value_max}
37347	opspec := tf.OpSpec{
37348		Type: "RequantizationRangePerChannel",
37349		Input: []tf.Input{
37350			input, input_min, input_max,
37351		},
37352		Attrs: attrs,
37353	}
37354	op := scope.AddOperation(opspec)
37355	return op.Output(0), op.Output(1)
37356}
37357
37358// ExtractGlimpseAttr is an optional argument to ExtractGlimpse.
37359type ExtractGlimpseAttr func(optionalAttr)
37360
37361// ExtractGlimpseCentered sets the optional centered attribute to value.
37362//
37363// value: indicates if the offset coordinates are centered relative to
37364// the image, in which case the (0, 0) offset is relative to the center
37365// of the input images. If false, the (0,0) offset corresponds to the
37366// upper left corner of the input images.
37367// If not specified, defaults to true
37368func ExtractGlimpseCentered(value bool) ExtractGlimpseAttr {
37369	return func(m optionalAttr) {
37370		m["centered"] = value
37371	}
37372}
37373
37374// ExtractGlimpseNormalized sets the optional normalized attribute to value.
37375//
37376// value: indicates if the offset coordinates are normalized.
37377// If not specified, defaults to true
37378func ExtractGlimpseNormalized(value bool) ExtractGlimpseAttr {
37379	return func(m optionalAttr) {
37380		m["normalized"] = value
37381	}
37382}
37383
37384// ExtractGlimpseUniformNoise sets the optional uniform_noise attribute to value.
37385//
37386// value: indicates if the noise should be generated using a
37387// uniform distribution or a Gaussian distribution.
37388// If not specified, defaults to true
37389func ExtractGlimpseUniformNoise(value bool) ExtractGlimpseAttr {
37390	return func(m optionalAttr) {
37391		m["uniform_noise"] = value
37392	}
37393}
37394
37395// ExtractGlimpseNoise sets the optional noise attribute to value.
37396//
37397// value: indicates if the noise should `uniform`, `gaussian`, or
37398// `zero`. The default is `uniform` which means the the noise type
37399// will be decided by `uniform_noise`.
37400// If not specified, defaults to "uniform"
37401func ExtractGlimpseNoise(value string) ExtractGlimpseAttr {
37402	return func(m optionalAttr) {
37403		m["noise"] = value
37404	}
37405}
37406
37407// Extracts a glimpse from the input tensor.
37408//
37409// Returns a set of windows called glimpses extracted at location
37410// `offsets` from the input tensor. If the windows only partially
37411// overlaps the inputs, the non overlapping areas will be filled with
37412// random noise.
37413//
37414// The result is a 4-D tensor of shape `[batch_size, glimpse_height,
37415// glimpse_width, channels]`. The channels and batch dimensions are the
37416// same as that of the input tensor. The height and width of the output
37417// windows are specified in the `size` parameter.
37418//
37419// The argument `normalized` and `centered` controls how the windows are built:
37420//
37421// * If the coordinates are normalized but not centered, 0.0 and 1.0
37422//   correspond to the minimum and maximum of each height and width
37423//   dimension.
37424// * If the coordinates are both normalized and centered, they range from
37425//   -1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper
37426//   left corner, the lower right corner is located at (1.0, 1.0) and the
37427//   center is at (0, 0).
37428// * If the coordinates are not normalized they are interpreted as
37429//   numbers of pixels.
37430//
37431// Arguments:
37432//	input: A 4-D float tensor of shape `[batch_size, height, width, channels]`.
37433//	size: A 1-D tensor of 2 elements containing the size of the glimpses
37434// to extract.  The glimpse height must be specified first, following
37435// by the glimpse width.
37436//	offsets: A 2-D integer tensor of shape `[batch_size, 2]` containing
37437// the y, x locations of the center of each window.
37438//
37439// Returns A tensor representing the glimpses `[batch_size,
37440// glimpse_height, glimpse_width, channels]`.
37441func ExtractGlimpse(scope *Scope, input tf.Output, size tf.Output, offsets tf.Output, optional ...ExtractGlimpseAttr) (glimpse tf.Output) {
37442	if scope.Err() != nil {
37443		return
37444	}
37445	attrs := map[string]interface{}{}
37446	for _, a := range optional {
37447		a(attrs)
37448	}
37449	opspec := tf.OpSpec{
37450		Type: "ExtractGlimpse",
37451		Input: []tf.Input{
37452			input, size, offsets,
37453		},
37454		Attrs: attrs,
37455	}
37456	op := scope.AddOperation(opspec)
37457	return op.Output(0)
37458}
37459
37460// A container for an iterator resource.
37461//
37462// Returns A handle to the iterator that can be passed to a "MakeIterator"
37463// or "IteratorGetNext" op.
37464func Iterator(scope *Scope, shared_name string, container string, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
37465	if scope.Err() != nil {
37466		return
37467	}
37468	attrs := map[string]interface{}{"shared_name": shared_name, "container": container, "output_types": output_types, "output_shapes": output_shapes}
37469	opspec := tf.OpSpec{
37470		Type: "Iterator",
37471
37472		Attrs: attrs,
37473	}
37474	op := scope.AddOperation(opspec)
37475	return op.Output(0)
37476}
37477
37478// TensorForestTreeResourceHandleOpAttr is an optional argument to TensorForestTreeResourceHandleOp.
37479type TensorForestTreeResourceHandleOpAttr func(optionalAttr)
37480
37481// TensorForestTreeResourceHandleOpContainer sets the optional container attribute to value.
37482// If not specified, defaults to ""
37483func TensorForestTreeResourceHandleOpContainer(value string) TensorForestTreeResourceHandleOpAttr {
37484	return func(m optionalAttr) {
37485		m["container"] = value
37486	}
37487}
37488
37489// TensorForestTreeResourceHandleOpSharedName sets the optional shared_name attribute to value.
37490// If not specified, defaults to ""
37491func TensorForestTreeResourceHandleOpSharedName(value string) TensorForestTreeResourceHandleOpAttr {
37492	return func(m optionalAttr) {
37493		m["shared_name"] = value
37494	}
37495}
37496
37497// Creates a handle to a TensorForestTreeResource
37498func TensorForestTreeResourceHandleOp(scope *Scope, optional ...TensorForestTreeResourceHandleOpAttr) (resource tf.Output) {
37499	if scope.Err() != nil {
37500		return
37501	}
37502	attrs := map[string]interface{}{}
37503	for _, a := range optional {
37504		a(attrs)
37505	}
37506	opspec := tf.OpSpec{
37507		Type: "TensorForestTreeResourceHandleOp",
37508
37509		Attrs: attrs,
37510	}
37511	op := scope.AddOperation(opspec)
37512	return op.Output(0)
37513}
37514
37515// CropAndResizeGradImageAttr is an optional argument to CropAndResizeGradImage.
37516type CropAndResizeGradImageAttr func(optionalAttr)
37517
37518// CropAndResizeGradImageMethod sets the optional method attribute to value.
37519//
37520// value: A string specifying the interpolation method. Only 'bilinear' is
37521// supported for now.
37522// If not specified, defaults to "bilinear"
37523func CropAndResizeGradImageMethod(value string) CropAndResizeGradImageAttr {
37524	return func(m optionalAttr) {
37525		m["method"] = value
37526	}
37527}
37528
37529// Computes the gradient of the crop_and_resize op wrt the input image tensor.
37530//
37531// Arguments:
37532//	grads: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.
37533//	boxes: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
37534// specifies the coordinates of a box in the `box_ind[i]` image and is specified
37535// in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of
37536// `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the
37537// `[0, 1]` interval of normalized image height is mapped to
37538// `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in
37539// which case the sampled crop is an up-down flipped version of the original
37540// image. The width dimension is treated similarly. Normalized coordinates
37541// outside the `[0, 1]` range are allowed, in which case we use
37542// `extrapolation_value` to extrapolate the input image values.
37543//	box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
37544// The value of `box_ind[i]` specifies the image that the `i`-th box refers to.
37545//	image_size: A 1-D tensor with value `[batch, image_height, image_width, depth]`
37546// containing the original image size. Both `image_height` and `image_width` need
37547// to be positive.
37548//
37549//
37550// Returns A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
37551func CropAndResizeGradImage(scope *Scope, grads tf.Output, boxes tf.Output, box_ind tf.Output, image_size tf.Output, T tf.DataType, optional ...CropAndResizeGradImageAttr) (output tf.Output) {
37552	if scope.Err() != nil {
37553		return
37554	}
37555	attrs := map[string]interface{}{"T": T}
37556	for _, a := range optional {
37557		a(attrs)
37558	}
37559	opspec := tf.OpSpec{
37560		Type: "CropAndResizeGradImage",
37561		Input: []tf.Input{
37562			grads, boxes, box_ind, image_size,
37563		},
37564		Attrs: attrs,
37565	}
37566	op := scope.AddOperation(opspec)
37567	return op.Output(0)
37568}
37569
37570// ShuffleDatasetAttr is an optional argument to ShuffleDataset.
37571type ShuffleDatasetAttr func(optionalAttr)
37572
37573// ShuffleDatasetReshuffleEachIteration sets the optional reshuffle_each_iteration attribute to value.
37574//
37575// value: If true, each iterator over this dataset will be given
37576// a different pseudorandomly generated seed, based on a sequence seeded by the
37577// `seed` and `seed2` inputs. If false, each iterator will be given the same
37578// seed, and repeated iteration over this dataset will yield the exact same
37579// sequence of results.
37580// If not specified, defaults to true
37581func ShuffleDatasetReshuffleEachIteration(value bool) ShuffleDatasetAttr {
37582	return func(m optionalAttr) {
37583		m["reshuffle_each_iteration"] = value
37584	}
37585}
37586
37587// Creates a dataset that shuffles elements from `input_dataset` pseudorandomly.
37588//
37589// Arguments:
37590//
37591//	buffer_size: The number of output elements to buffer in an iterator over
37592// this dataset. Compare with the `min_after_dequeue` attr when creating a
37593// `RandomShuffleQueue`.
37594//	seed: A scalar seed for the random number generator. If either `seed` or
37595// `seed2` is set to be non-zero, the random number generator is seeded
37596// by the given seed.  Otherwise, a random seed is used.
37597//	seed2: A second scalar seed to avoid seed collision.
37598//
37599//
37600func ShuffleDataset(scope *Scope, input_dataset tf.Output, buffer_size tf.Output, seed tf.Output, seed2 tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ShuffleDatasetAttr) (handle tf.Output) {
37601	if scope.Err() != nil {
37602		return
37603	}
37604	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
37605	for _, a := range optional {
37606		a(attrs)
37607	}
37608	opspec := tf.OpSpec{
37609		Type: "ShuffleDataset",
37610		Input: []tf.Input{
37611			input_dataset, buffer_size, seed, seed2,
37612		},
37613		Attrs: attrs,
37614	}
37615	op := scope.AddOperation(opspec)
37616	return op.Output(0)
37617}
37618
37619// 3D fast Fourier transform.
37620//
37621// Computes the 3-dimensional discrete Fourier transform over the inner-most 3
37622// dimensions of `input`.
37623//
37624// Arguments:
37625//	input: A complex64 tensor.
37626//
37627// Returns A complex64 tensor of the same shape as `input`. The inner-most 3
37628//   dimensions of `input` are replaced with their 3D Fourier transform.
37629//
37630// @compatibility(numpy)
37631// Equivalent to np.fft.fftn with 3 dimensions.
37632// @end_compatibility
37633func FFT3D(scope *Scope, input tf.Output) (output tf.Output) {
37634	if scope.Err() != nil {
37635		return
37636	}
37637	opspec := tf.OpSpec{
37638		Type: "FFT3D",
37639		Input: []tf.Input{
37640			input,
37641		},
37642	}
37643	op := scope.AddOperation(opspec)
37644	return op.Output(0)
37645}
37646
37647// CropAndResizeGradBoxesAttr is an optional argument to CropAndResizeGradBoxes.
37648type CropAndResizeGradBoxesAttr func(optionalAttr)
37649
37650// CropAndResizeGradBoxesMethod sets the optional method attribute to value.
37651//
37652// value: A string specifying the interpolation method. Only 'bilinear' is
37653// supported for now.
37654// If not specified, defaults to "bilinear"
37655func CropAndResizeGradBoxesMethod(value string) CropAndResizeGradBoxesAttr {
37656	return func(m optionalAttr) {
37657		m["method"] = value
37658	}
37659}
37660
37661// Computes the gradient of the crop_and_resize op wrt the input boxes tensor.
37662//
37663// Arguments:
37664//	grads: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.
37665//	image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
37666// Both `image_height` and `image_width` need to be positive.
37667//	boxes: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
37668// specifies the coordinates of a box in the `box_ind[i]` image and is specified
37669// in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of
37670// `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the
37671// `[0, 1]` interval of normalized image height is mapped to
37672// `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in
37673// which case the sampled crop is an up-down flipped version of the original
37674// image. The width dimension is treated similarly. Normalized coordinates
37675// outside the `[0, 1]` range are allowed, in which case we use
37676// `extrapolation_value` to extrapolate the input image values.
37677//	box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
37678// The value of `box_ind[i]` specifies the image that the `i`-th box refers to.
37679//
37680// Returns A 2-D tensor of shape `[num_boxes, 4]`.
37681func CropAndResizeGradBoxes(scope *Scope, grads tf.Output, image tf.Output, boxes tf.Output, box_ind tf.Output, optional ...CropAndResizeGradBoxesAttr) (output tf.Output) {
37682	if scope.Err() != nil {
37683		return
37684	}
37685	attrs := map[string]interface{}{}
37686	for _, a := range optional {
37687		a(attrs)
37688	}
37689	opspec := tf.OpSpec{
37690		Type: "CropAndResizeGradBoxes",
37691		Input: []tf.Input{
37692			grads, image, boxes, box_ind,
37693		},
37694		Attrs: attrs,
37695	}
37696	op := scope.AddOperation(opspec)
37697	return op.Output(0)
37698}
37699
37700// Greedily selects a subset of bounding boxes in descending order of score,
37701//
37702// pruning away boxes that have high intersection-over-union (IOU) overlap
37703// with previously selected boxes.  Bounding boxes with score less than
37704// `score_threshold` are removed.  Bounding boxes are supplied as
37705// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
37706// diagonal pair of box corners and the coordinates can be provided as normalized
37707// (i.e., lying in the interval [0, 1]) or absolute.  Note that this algorithm
37708// is agnostic to where the origin is in the coordinate system and more
37709// generally is invariant to orthogonal transformations and translations
37710// of the coordinate system; thus translating or reflections of the coordinate
37711// system result in the same boxes being selected by the algorithm.
37712// The output of this operation is a set of integers indexing into the input
37713// collection of bounding boxes representing the selected boxes.  The bounding
37714// box coordinates corresponding to the selected indices can then be obtained
37715// using the `tf.gather operation`.  For example:
37716//   selected_indices = tf.image.non_max_suppression_v2(
37717//       boxes, scores, max_output_size, iou_threshold, score_threshold)
37718//   selected_boxes = tf.gather(boxes, selected_indices)
37719//
37720// Arguments:
37721//	boxes: A 2-D float tensor of shape `[num_boxes, 4]`.
37722//	scores: A 1-D float tensor of shape `[num_boxes]` representing a single
37723// score corresponding to each box (each row of boxes).
37724//	max_output_size: A scalar integer tensor representing the maximum number of
37725// boxes to be selected by non max suppression.
37726//	iou_threshold: A 0-D float tensor representing the threshold for deciding whether
37727// boxes overlap too much with respect to IOU.
37728//	score_threshold: A 0-D float tensor representing the threshold for deciding when to remove
37729// boxes based on score.
37730//
37731// Returns A 1-D integer tensor of shape `[M]` representing the selected
37732// indices from the boxes tensor, where `M <= max_output_size`.
37733func NonMaxSuppressionV3(scope *Scope, boxes tf.Output, scores tf.Output, max_output_size tf.Output, iou_threshold tf.Output, score_threshold tf.Output) (selected_indices tf.Output) {
37734	if scope.Err() != nil {
37735		return
37736	}
37737	opspec := tf.OpSpec{
37738		Type: "NonMaxSuppressionV3",
37739		Input: []tf.Input{
37740			boxes, scores, max_output_size, iou_threshold, score_threshold,
37741		},
37742	}
37743	op := scope.AddOperation(opspec)
37744	return op.Output(0)
37745}
37746
37747// NonMaxSuppressionV4Attr is an optional argument to NonMaxSuppressionV4.
37748type NonMaxSuppressionV4Attr func(optionalAttr)
37749
37750// NonMaxSuppressionV4PadToMaxOutputSize sets the optional pad_to_max_output_size attribute to value.
37751//
37752// value: If true, the output `selected_indices` is padded to be of length
37753// `max_output_size`. Defaults to false.
37754// If not specified, defaults to false
37755func NonMaxSuppressionV4PadToMaxOutputSize(value bool) NonMaxSuppressionV4Attr {
37756	return func(m optionalAttr) {
37757		m["pad_to_max_output_size"] = value
37758	}
37759}
37760
37761// Greedily selects a subset of bounding boxes in descending order of score,
37762//
37763// pruning away boxes that have high intersection-over-union (IOU) overlap
37764// with previously selected boxes.  Bounding boxes with score less than
37765// `score_threshold` are removed.  Bounding boxes are supplied as
37766// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
37767// diagonal pair of box corners and the coordinates can be provided as normalized
37768// (i.e., lying in the interval [0, 1]) or absolute.  Note that this algorithm
37769// is agnostic to where the origin is in the coordinate system and more
37770// generally is invariant to orthogonal transformations and translations
37771// of the coordinate system; thus translating or reflections of the coordinate
37772// system result in the same boxes being selected by the algorithm.
37773// The output of this operation is a set of integers indexing into the input
37774// collection of bounding boxes representing the selected boxes.  The bounding
37775// box coordinates corresponding to the selected indices can then be obtained
37776// using the `tf.gather operation`.  For example:
37777//   selected_indices = tf.image.non_max_suppression_v2(
37778//       boxes, scores, max_output_size, iou_threshold, score_threshold)
37779//   selected_boxes = tf.gather(boxes, selected_indices)
37780//
37781// Arguments:
37782//	boxes: A 2-D float tensor of shape `[num_boxes, 4]`.
37783//	scores: A 1-D float tensor of shape `[num_boxes]` representing a single
37784// score corresponding to each box (each row of boxes).
37785//	max_output_size: A scalar integer tensor representing the maximum number of
37786// boxes to be selected by non max suppression.
37787//	iou_threshold: A 0-D float tensor representing the threshold for deciding whether
37788// boxes overlap too much with respect to IOU.
37789//	score_threshold: A 0-D float tensor representing the threshold for deciding when to remove
37790// boxes based on score.
37791//
37792// Returns A 1-D integer tensor of shape `[M]` representing the selected
37793// indices from the boxes tensor, where `M <= max_output_size`.A 0-D integer tensor representing the number of valid elements in
37794// `selected_indices`, with the valid elements appearing first.
37795func NonMaxSuppressionV4(scope *Scope, boxes tf.Output, scores tf.Output, max_output_size tf.Output, iou_threshold tf.Output, score_threshold tf.Output, optional ...NonMaxSuppressionV4Attr) (selected_indices tf.Output, valid_outputs tf.Output) {
37796	if scope.Err() != nil {
37797		return
37798	}
37799	attrs := map[string]interface{}{}
37800	for _, a := range optional {
37801		a(attrs)
37802	}
37803	opspec := tf.OpSpec{
37804		Type: "NonMaxSuppressionV4",
37805		Input: []tf.Input{
37806			boxes, scores, max_output_size, iou_threshold, score_threshold,
37807		},
37808		Attrs: attrs,
37809	}
37810	op := scope.AddOperation(opspec)
37811	return op.Output(0), op.Output(1)
37812}
37813
37814// Removes keys and its associated values from a table.
37815//
37816// The tensor `keys` must of the same type as the keys of the table. Keys not
37817// already in the table are silently ignored.
37818//
37819// Arguments:
37820//	table_handle: Handle to the table.
37821//	keys: Any shape.  Keys of the elements to remove.
37822//
37823// Returns the created operation.
37824func LookupTableRemoveV2(scope *Scope, table_handle tf.Output, keys tf.Output) (o *tf.Operation) {
37825	if scope.Err() != nil {
37826		return
37827	}
37828	opspec := tf.OpSpec{
37829		Type: "LookupTableRemoveV2",
37830		Input: []tf.Input{
37831			table_handle, keys,
37832		},
37833	}
37834	return scope.AddOperation(opspec)
37835}
37836
37837// CombinedNonMaxSuppressionAttr is an optional argument to CombinedNonMaxSuppression.
37838type CombinedNonMaxSuppressionAttr func(optionalAttr)
37839
37840// CombinedNonMaxSuppressionPadPerClass sets the optional pad_per_class attribute to value.
37841//
37842// value: If false, the output nmsed boxes, scores and classes
37843// are padded/clipped to `max_total_size`. If true, the
37844// output nmsed boxes, scores and classes are padded to be of length
37845// `max_size_per_class`*`num_classes`, unless it exceeds `max_total_size` in
37846// which case it is clipped to `max_total_size`. Defaults to false.
37847// If not specified, defaults to false
37848func CombinedNonMaxSuppressionPadPerClass(value bool) CombinedNonMaxSuppressionAttr {
37849	return func(m optionalAttr) {
37850		m["pad_per_class"] = value
37851	}
37852}
37853
37854// Greedily selects a subset of bounding boxes in descending order of score,
37855//
37856// This operation performs non_max_suppression on the inputs per batch, across
37857// all classes.
37858// Prunes away boxes that have high intersection-over-union (IOU) overlap
37859// with previously selected boxes.  Bounding boxes are supplied as
37860// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
37861// diagonal pair of box corners and the coordinates can be provided as normalized
37862// (i.e., lying in the interval [0, 1]) or absolute.  Note that this algorithm
37863// is agnostic to where the origin is in the coordinate system. Also note that
37864// this algorithm is invariant to orthogonal transformations and translations
37865// of the coordinate system; thus translating or reflections of the coordinate
37866// system result in the same boxes being selected by the algorithm.
37867// The output of this operation is the final boxes, scores and classes tensor
37868// returned after performing non_max_suppression.
37869//
37870// Arguments:
37871//	boxes: A 4-D float tensor of shape `[batch_size, num_boxes, q, 4]`. If `q` is 1 then
37872// same boxes are used for all classes otherwise, if `q` is equal to number of
37873// classes, class-specific boxes are used.
37874//	scores: A 3-D float tensor of shape `[batch_size, num_boxes, num_classes]`
37875// representing a single score corresponding to each box (each row of boxes).
37876//	max_output_size_per_class: A scalar integer tensor representing the maximum number of
37877// boxes to be selected by non max suppression per class
37878//	max_total_size: A scalar representing maximum number of boxes retained over all classes.
37879//	iou_threshold: A 0-D float tensor representing the threshold for deciding whether
37880// boxes overlap too much with respect to IOU.
37881//	score_threshold: A 0-D float tensor representing the threshold for deciding when to remove
37882// boxes based on score.
37883//
37884// Returns A [batch_size, max_detections, 4] float32 tensor
37885// containing the non-max suppressed boxes.A [batch_size, max_detections] float32 tensor
37886// containing the scores for the boxes.A [batch_size, max_detections] float32 tensor
37887// containing the classes for the boxes.A [batch_size] int32 tensor indicating the number of
37888// valid detections per batch item. Only the top num_detections[i] entries in
37889// nms_boxes[i], nms_scores[i] and nms_class[i] are valid. The rest of the
37890// entries are zero paddings.
37891func CombinedNonMaxSuppression(scope *Scope, boxes tf.Output, scores tf.Output, max_output_size_per_class tf.Output, max_total_size tf.Output, iou_threshold tf.Output, score_threshold tf.Output, optional ...CombinedNonMaxSuppressionAttr) (nmsed_boxes tf.Output, nmsed_scores tf.Output, nmsed_classes tf.Output, valid_detections tf.Output) {
37892	if scope.Err() != nil {
37893		return
37894	}
37895	attrs := map[string]interface{}{}
37896	for _, a := range optional {
37897		a(attrs)
37898	}
37899	opspec := tf.OpSpec{
37900		Type: "CombinedNonMaxSuppression",
37901		Input: []tf.Input{
37902			boxes, scores, max_output_size_per_class, max_total_size, iou_threshold, score_threshold,
37903		},
37904		Attrs: attrs,
37905	}
37906	op := scope.AddOperation(opspec)
37907	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
37908}
37909
37910// Computes the matrix logarithm of one or more square matrices:
37911//
37912//
37913// \\(log(exp(A)) = A\\)
37914//
37915// This op is only defined for complex matrices. If A is positive-definite and
37916// real, then casting to a complex matrix, taking the logarithm and casting back
37917// to a real matrix will give the correct result.
37918//
37919// This function computes the matrix logarithm using the Schur-Parlett algorithm.
37920// Details of the algorithm can be found in Section 11.6.2 of:
37921// Nicholas J. Higham, Functions of Matrices: Theory and Computation, SIAM 2008.
37922// ISBN 978-0-898716-46-7.
37923//
37924// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
37925// form square matrices. The output is a tensor of the same shape as the input
37926// containing the exponential for all input submatrices `[..., :, :]`.
37927//
37928// Arguments:
37929//	input: Shape is `[..., M, M]`.
37930//
37931// Returns Shape is `[..., M, M]`.
37932//
37933// @compatibility(scipy)
37934// Equivalent to scipy.linalg.logm
37935// @end_compatibility
37936func MatrixLogarithm(scope *Scope, input tf.Output) (output tf.Output) {
37937	if scope.Err() != nil {
37938		return
37939	}
37940	opspec := tf.OpSpec{
37941		Type: "MatrixLogarithm",
37942		Input: []tf.Input{
37943			input,
37944		},
37945	}
37946	op := scope.AddOperation(opspec)
37947	return op.Output(0)
37948}
37949
37950//   This op is used as a placeholder in If branch functions. It doesn't provide a
37951//   valid output when run, so must either be removed (e.g. replaced with a
37952//   function input) or guaranteed not to be used (e.g. if mirroring an
37953//   intermediate output needed for the gradient computation of the other branch).
37954//
37955// Arguments:
37956//	dtype: The type of the output.
37957//	shape:     The purported shape of the output. This is only used for shape inference;
37958//     the output will not necessarily have this shape. Can be a partial shape.
37959//
37960// Returns     \"Fake\" output value. This should not be consumed by another op.
37961func FakeParam(scope *Scope, dtype tf.DataType, shape tf.Shape) (output tf.Output) {
37962	if scope.Err() != nil {
37963		return
37964	}
37965	attrs := map[string]interface{}{"dtype": dtype, "shape": shape}
37966	opspec := tf.OpSpec{
37967		Type: "FakeParam",
37968
37969		Attrs: attrs,
37970	}
37971	op := scope.AddOperation(opspec)
37972	return op.Output(0)
37973}
37974
37975// Returns the next representable value of `x1` in the direction of `x2`, element-wise.
37976//
37977// This operation returns the same result as the C++ std::nextafter function.
37978//
37979// It can also return a subnormal number.
37980//
37981// @compatibility(cpp)
37982// Equivalent to C++ std::nextafter function.
37983// @end_compatibility
37984func NextAfter(scope *Scope, x1 tf.Output, x2 tf.Output) (output tf.Output) {
37985	if scope.Err() != nil {
37986		return
37987	}
37988	opspec := tf.OpSpec{
37989		Type: "NextAfter",
37990		Input: []tf.Input{
37991			x1, x2,
37992		},
37993	}
37994	op := scope.AddOperation(opspec)
37995	return op.Output(0)
37996}
37997
37998// OrderedMapStageAttr is an optional argument to OrderedMapStage.
37999type OrderedMapStageAttr func(optionalAttr)
38000
38001// OrderedMapStageCapacity sets the optional capacity attribute to value.
38002//
38003// value: Maximum number of elements in the Staging Area. If > 0, inserts
38004// on the container will block when the capacity is reached.
38005// If not specified, defaults to 0
38006//
38007// REQUIRES: value >= 0
38008func OrderedMapStageCapacity(value int64) OrderedMapStageAttr {
38009	return func(m optionalAttr) {
38010		m["capacity"] = value
38011	}
38012}
38013
38014// OrderedMapStageMemoryLimit sets the optional memory_limit attribute to value.
38015// If not specified, defaults to 0
38016//
38017// REQUIRES: value >= 0
38018func OrderedMapStageMemoryLimit(value int64) OrderedMapStageAttr {
38019	return func(m optionalAttr) {
38020		m["memory_limit"] = value
38021	}
38022}
38023
38024// OrderedMapStageContainer sets the optional container attribute to value.
38025//
38026// value: If non-empty, this queue is placed in the given container. Otherwise,
38027// a default container is used.
38028// If not specified, defaults to ""
38029func OrderedMapStageContainer(value string) OrderedMapStageAttr {
38030	return func(m optionalAttr) {
38031		m["container"] = value
38032	}
38033}
38034
38035// OrderedMapStageSharedName sets the optional shared_name attribute to value.
38036//
38037// value: It is necessary to match this name to the matching Unstage Op.
38038// If not specified, defaults to ""
38039func OrderedMapStageSharedName(value string) OrderedMapStageAttr {
38040	return func(m optionalAttr) {
38041		m["shared_name"] = value
38042	}
38043}
38044
38045// Stage (key, values) in the underlying container which behaves like a ordered
38046//
38047// associative container.   Elements are ordered by key.
38048//
38049// Arguments:
38050//	key: int64
38051//
38052//	values: a list of tensors
38053// dtypes A list of data types that inserted values should adhere to.
38054//
38055//
38056// Returns the created operation.
38057func OrderedMapStage(scope *Scope, key tf.Output, indices tf.Output, values []tf.Output, dtypes []tf.DataType, optional ...OrderedMapStageAttr) (o *tf.Operation) {
38058	if scope.Err() != nil {
38059		return
38060	}
38061	attrs := map[string]interface{}{"dtypes": dtypes}
38062	for _, a := range optional {
38063		a(attrs)
38064	}
38065	opspec := tf.OpSpec{
38066		Type: "OrderedMapStage",
38067		Input: []tf.Input{
38068			key, indices, tf.OutputList(values),
38069		},
38070		Attrs: attrs,
38071	}
38072	return scope.AddOperation(opspec)
38073}
38074
38075// StackPushV2Attr is an optional argument to StackPushV2.
38076type StackPushV2Attr func(optionalAttr)
38077
38078// StackPushV2SwapMemory sets the optional swap_memory attribute to value.
38079//
38080// value: Swap `elem` to CPU. Default to false.
38081// If not specified, defaults to false
38082func StackPushV2SwapMemory(value bool) StackPushV2Attr {
38083	return func(m optionalAttr) {
38084		m["swap_memory"] = value
38085	}
38086}
38087
38088// Push an element onto the stack.
38089//
38090// Arguments:
38091//	handle: The handle to a stack.
38092//	elem: The tensor to be pushed onto the stack.
38093//
38094// Returns The same tensor as the input 'elem'.
38095func StackPushV2(scope *Scope, handle tf.Output, elem tf.Output, optional ...StackPushV2Attr) (output tf.Output) {
38096	if scope.Err() != nil {
38097		return
38098	}
38099	attrs := map[string]interface{}{}
38100	for _, a := range optional {
38101		a(attrs)
38102	}
38103	opspec := tf.OpSpec{
38104		Type: "StackPushV2",
38105		Input: []tf.Input{
38106			handle, elem,
38107		},
38108		Attrs: attrs,
38109	}
38110	op := scope.AddOperation(opspec)
38111	return op.Output(0)
38112}
38113
38114// RpcAttr is an optional argument to Rpc.
38115type RpcAttr func(optionalAttr)
38116
38117// RpcProtocol sets the optional protocol attribute to value.
38118//
38119// value: RPC protocol to use.  Empty string means use the default protocol.
38120// Options include 'grpc'.
38121// If not specified, defaults to ""
38122func RpcProtocol(value string) RpcAttr {
38123	return func(m optionalAttr) {
38124		m["protocol"] = value
38125	}
38126}
38127
38128// RpcFailFast sets the optional fail_fast attribute to value.
38129//
38130// value: `boolean`. If `true` (default), then failures to connect
38131// (i.e., the server does not immediately respond) cause an RPC failure.
38132// If not specified, defaults to true
38133func RpcFailFast(value bool) RpcAttr {
38134	return func(m optionalAttr) {
38135		m["fail_fast"] = value
38136	}
38137}
38138
38139// RpcTimeoutInMs sets the optional timeout_in_ms attribute to value.
38140//
38141// value: `int`. If `0` (default), then the kernel will run the RPC
38142// request and only time out if the RPC deadline passes or the session times out.
38143// If this value is greater than `0`, then the op will raise an exception if
38144// the RPC takes longer than `timeout_in_ms`.
38145// If not specified, defaults to 0
38146func RpcTimeoutInMs(value int64) RpcAttr {
38147	return func(m optionalAttr) {
38148		m["timeout_in_ms"] = value
38149	}
38150}
38151
38152// Perform batches of RPC requests.
38153//
38154// This op asynchronously performs either a single RPC request, or a batch
38155// of requests.  RPC requests are defined by three main parameters:
38156//
38157//   - `address` (the host+port or BNS address of the request)
38158//   - `method` (the RPC method name for the request)
38159//   - `request` (the serialized proto string, or vector of strings,
38160//      of the RPC request argument).
38161//
38162// For example, if you have an RPC service running on port localhost:2345,
38163// and its interface is configured with the following proto declaration:
38164//
38165// ```
38166// service MyService {
38167//   rpc MyMethod(MyRequestProto) returns (MyResponseProto) {
38168//   }
38169// };
38170// ```
38171//
38172// then call this op with arguments:
38173//
38174// ```
38175// address = "localhost:2345"
38176// method = "MyService/MyMethod"
38177// ```
38178//
38179// The `request` tensor is a string tensor representing serialized `MyRequestProto`
38180// strings; and the output string tensor `response` will have the same shape
38181// and contain (upon successful completion) corresponding serialized
38182// `MyResponseProto` strings.
38183//
38184// For example, to send a single, empty, `MyRequestProto`, call
38185// this op with `request = ""`.  To send 5 **parallel** empty requests,
38186// call this op with `request = ["", "", "", "", ""]`.
38187//
38188// More generally, one can create a batch of `MyRequestProto` serialized protos
38189// from regular batched tensors using the `encode_proto` op, and convert
38190// the response `MyResponseProto` serialized protos to batched tensors
38191// using the `decode_proto` op.
38192//
38193// **NOTE** Working with serialized proto strings is faster than instantiating
38194// actual proto objects in memory, so no performance degradation is expected
38195// compared to writing custom kernels for this workflow.
38196//
38197// If the connection fails or the remote worker returns an error
38198// status, the op reraises this exception locally.
38199//
38200// See the `TryRpc` op if you prefer to handle RPC failures manually in the graph.
38201//
38202// Arguments:
38203//	address: `0-D` or `1-D`.  The address (i.e. host_name:port) of the RPC server.
38204// If this tensor has more than 1 element, then multiple parallel rpc requests
38205// are sent.  This argument broadcasts with `method` and `request`.
38206//	method: `0-D` or `1-D`.  The method address on the RPC server.
38207// If this tensor has more than 1 element, then multiple parallel rpc requests
38208// are sent.  This argument broadcasts with `address` and `request`.
38209//	request: `0-D` or `1-D`.  Serialized proto strings: the rpc request argument.
38210// If this tensor has more than 1 element, then multiple parallel rpc requests
38211// are sent.  This argument broadcasts with `address` and `method`.
38212//
38213// Returns Same shape as `request`. Serialized proto strings: the rpc responses.
38214func Rpc(scope *Scope, address tf.Output, method tf.Output, request tf.Output, optional ...RpcAttr) (response tf.Output) {
38215	if scope.Err() != nil {
38216		return
38217	}
38218	attrs := map[string]interface{}{}
38219	for _, a := range optional {
38220		a(attrs)
38221	}
38222	opspec := tf.OpSpec{
38223		Type: "Rpc",
38224		Input: []tf.Input{
38225			address, method, request,
38226		},
38227		Attrs: attrs,
38228	}
38229	op := scope.AddOperation(opspec)
38230	return op.Output(0)
38231}
38232
38233// Records the bytes size of each element of `input_dataset` in a StatsAggregator.
38234func ExperimentalBytesProducedStatsDataset(scope *Scope, input_dataset tf.Output, tag tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
38235	if scope.Err() != nil {
38236		return
38237	}
38238	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
38239	opspec := tf.OpSpec{
38240		Type: "ExperimentalBytesProducedStatsDataset",
38241		Input: []tf.Input{
38242			input_dataset, tag,
38243		},
38244		Attrs: attrs,
38245	}
38246	op := scope.AddOperation(opspec)
38247	return op.Output(0)
38248}
38249
38250// A substitute for `InterleaveDataset` on a fixed list of `N` datasets.
38251//
38252// Arguments:
38253//	selector_input_dataset: A dataset of scalar `DT_INT64` elements that determines which of the
38254// `N` data inputs should produce the next output element.
38255//	data_input_datasets: `N` datasets with the same type that will be interleaved according to
38256// the values of `selector_input_dataset`.
38257//
38258//
38259func ExperimentalDirectedInterleaveDataset(scope *Scope, selector_input_dataset tf.Output, data_input_datasets []tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
38260	if scope.Err() != nil {
38261		return
38262	}
38263	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
38264	opspec := tf.OpSpec{
38265		Type: "ExperimentalDirectedInterleaveDataset",
38266		Input: []tf.Input{
38267			selector_input_dataset, tf.OutputList(data_input_datasets),
38268		},
38269		Attrs: attrs,
38270	}
38271	op := scope.AddOperation(opspec)
38272	return op.Output(0)
38273}
38274
38275// RandomUniformIntAttr is an optional argument to RandomUniformInt.
38276type RandomUniformIntAttr func(optionalAttr)
38277
38278// RandomUniformIntSeed sets the optional seed attribute to value.
38279//
38280// value: If either `seed` or `seed2` are set to be non-zero, the random number
38281// generator is seeded by the given seed.  Otherwise, it is seeded by a
38282// random seed.
38283// If not specified, defaults to 0
38284func RandomUniformIntSeed(value int64) RandomUniformIntAttr {
38285	return func(m optionalAttr) {
38286		m["seed"] = value
38287	}
38288}
38289
38290// RandomUniformIntSeed2 sets the optional seed2 attribute to value.
38291//
38292// value: A second seed to avoid seed collision.
38293// If not specified, defaults to 0
38294func RandomUniformIntSeed2(value int64) RandomUniformIntAttr {
38295	return func(m optionalAttr) {
38296		m["seed2"] = value
38297	}
38298}
38299
38300// Outputs random integers from a uniform distribution.
38301//
38302// The generated values are uniform integers in the range `[minval, maxval)`.
38303// The lower bound `minval` is included in the range, while the upper bound
38304// `maxval` is excluded.
38305//
38306// The random integers are slightly biased unless `maxval - minval` is an exact
38307// power of two.  The bias is small for values of `maxval - minval` significantly
38308// smaller than the range of the output (either `2^32` or `2^64`).
38309//
38310// Arguments:
38311//	shape: The shape of the output tensor.
38312//	minval: 0-D.  Inclusive lower bound on the generated integers.
38313//	maxval: 0-D.  Exclusive upper bound on the generated integers.
38314//
38315// Returns A tensor of the specified shape filled with uniform random integers.
38316func RandomUniformInt(scope *Scope, shape tf.Output, minval tf.Output, maxval tf.Output, optional ...RandomUniformIntAttr) (output tf.Output) {
38317	if scope.Err() != nil {
38318		return
38319	}
38320	attrs := map[string]interface{}{}
38321	for _, a := range optional {
38322		a(attrs)
38323	}
38324	opspec := tf.OpSpec{
38325		Type: "RandomUniformInt",
38326		Input: []tf.Input{
38327			shape, minval, maxval,
38328		},
38329		Attrs: attrs,
38330	}
38331	op := scope.AddOperation(opspec)
38332	return op.Output(0)
38333}
38334
38335// Add the quantile summaries to each quantile stream resource.
38336//
38337// An op that adds a list of quantile summaries to a quantile stream resource. Each
38338// summary Tensor is rank 2, containing summaries (value, weight, min_rank, max_rank)
38339// for a single feature.
38340//
38341// Arguments:
38342//	quantile_stream_resource_handle: resource handle referring to a QuantileStreamResource.
38343//	summaries: string; List of Rank 2 Tensor each containing the summaries for a single feature.
38344//
38345// Returns the created operation.
38346func BoostedTreesQuantileStreamResourceAddSummaries(scope *Scope, quantile_stream_resource_handle tf.Output, summaries []tf.Output) (o *tf.Operation) {
38347	if scope.Err() != nil {
38348		return
38349	}
38350	opspec := tf.OpSpec{
38351		Type: "BoostedTreesQuantileStreamResourceAddSummaries",
38352		Input: []tf.Input{
38353			quantile_stream_resource_handle, tf.OutputList(summaries),
38354		},
38355	}
38356	return scope.AddOperation(opspec)
38357}
38358
38359// Creates a Dataset that returns pseudorandom numbers.
38360//
38361// Arguments:
38362//	seed: A scalar seed for the random number generator. If either seed or
38363// seed2 is set to be non-zero, the random number generator is seeded
38364// by the given seed.  Otherwise, a random seed is used.
38365//	seed2: A second scalar seed to avoid seed collision.
38366//
38367//
38368func ExperimentalRandomDataset(scope *Scope, seed tf.Output, seed2 tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
38369	if scope.Err() != nil {
38370		return
38371	}
38372	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
38373	opspec := tf.OpSpec{
38374		Type: "ExperimentalRandomDataset",
38375		Input: []tf.Input{
38376			seed, seed2,
38377		},
38378		Attrs: attrs,
38379	}
38380	op := scope.AddOperation(opspec)
38381	return op.Output(0)
38382}
38383
38384// Creates a dataset that overrides the maximum intra-op parallelism.
38385//
38386// Arguments:
38387//
38388//	max_intra_op_parallelism: Identifies the maximum intra-op parallelism to use.
38389//
38390//
38391func ExperimentalMaxIntraOpParallelismDataset(scope *Scope, input_dataset tf.Output, max_intra_op_parallelism tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
38392	if scope.Err() != nil {
38393		return
38394	}
38395	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
38396	opspec := tf.OpSpec{
38397		Type: "ExperimentalMaxIntraOpParallelismDataset",
38398		Input: []tf.Input{
38399			input_dataset, max_intra_op_parallelism,
38400		},
38401		Attrs: attrs,
38402	}
38403	op := scope.AddOperation(opspec)
38404	return op.Output(0)
38405}
38406
38407// StringSplitV2Attr is an optional argument to StringSplitV2.
38408type StringSplitV2Attr func(optionalAttr)
38409
38410// StringSplitV2Maxsplit sets the optional maxsplit attribute to value.
38411//
38412// value: An `int`. If `maxsplit > 0`, limit of the split of the result.
38413// If not specified, defaults to -1
38414func StringSplitV2Maxsplit(value int64) StringSplitV2Attr {
38415	return func(m optionalAttr) {
38416		m["maxsplit"] = value
38417	}
38418}
38419
38420// Split elements of `source` based on `sep` into a `SparseTensor`.
38421//
38422// Let N be the size of source (typically N will be the batch size). Split each
38423// element of `source` based on `sep` and return a `SparseTensor`
38424// containing the split tokens. Empty tokens are ignored.
38425//
38426// For example, N = 2, source[0] is 'hello world' and source[1] is 'a b c',
38427// then the output will be
38428// ```
38429// st.indices = [0, 0;
38430//               0, 1;
38431//               1, 0;
38432//               1, 1;
38433//               1, 2]
38434// st.shape = [2, 3]
38435// st.values = ['hello', 'world', 'a', 'b', 'c']
38436// ```
38437//
38438// If `sep` is given, consecutive delimiters are not grouped together and are
38439// deemed to delimit empty strings. For example, source of `"1<>2<><>3"` and
38440// sep of `"<>"` returns `["1", "2", "", "3"]`. If `sep` is None or an empty
38441// string, consecutive whitespace are regarded as a single separator, and the
38442// result will contain no empty strings at the startor end if the string has
38443// leading or trailing whitespace.
38444//
38445// Note that the above mentioned behavior matches python's str.split.
38446//
38447// Arguments:
38448//	input: `1-D` string `Tensor`, the strings to split.
38449//	sep: `0-D` string `Tensor`, the delimiter character.
38450func StringSplitV2(scope *Scope, input tf.Output, sep tf.Output, optional ...StringSplitV2Attr) (indices tf.Output, values tf.Output, shape tf.Output) {
38451	if scope.Err() != nil {
38452		return
38453	}
38454	attrs := map[string]interface{}{}
38455	for _, a := range optional {
38456		a(attrs)
38457	}
38458	opspec := tf.OpSpec{
38459		Type: "StringSplitV2",
38460		Input: []tf.Input{
38461			input, sep,
38462		},
38463		Attrs: attrs,
38464	}
38465	op := scope.AddOperation(opspec)
38466	return op.Output(0), op.Output(1), op.Output(2)
38467}
38468
38469// Creates a dataset that uses a custom thread pool to compute `input_dataset`.
38470//
38471// Arguments:
38472//
38473//	thread_pool: A resource produced by the ThreadPoolHandle op.
38474//
38475//
38476func ExperimentalThreadPoolDataset(scope *Scope, input_dataset tf.Output, thread_pool tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
38477	if scope.Err() != nil {
38478		return
38479	}
38480	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
38481	opspec := tf.OpSpec{
38482		Type: "ExperimentalThreadPoolDataset",
38483		Input: []tf.Input{
38484			input_dataset, thread_pool,
38485		},
38486		Attrs: attrs,
38487	}
38488	op := scope.AddOperation(opspec)
38489	return op.Output(0)
38490}
38491
38492// Computes softsign: `features / (abs(features) + 1)`.
38493func Softsign(scope *Scope, features tf.Output) (activations tf.Output) {
38494	if scope.Err() != nil {
38495		return
38496	}
38497	opspec := tf.OpSpec{
38498		Type: "Softsign",
38499		Input: []tf.Input{
38500			features,
38501		},
38502	}
38503	op := scope.AddOperation(opspec)
38504	return op.Output(0)
38505}
38506
38507// EncodeProtoAttr is an optional argument to EncodeProto.
38508type EncodeProtoAttr func(optionalAttr)
38509
38510// EncodeProtoDescriptorSource sets the optional descriptor_source attribute to value.
38511// If not specified, defaults to "local://"
38512func EncodeProtoDescriptorSource(value string) EncodeProtoAttr {
38513	return func(m optionalAttr) {
38514		m["descriptor_source"] = value
38515	}
38516}
38517
38518// The op serializes protobuf messages provided in the input tensors.
38519//
38520// The types of the tensors in `values` must match the schema for the
38521// fields specified in `field_names`. All the tensors in `values` must
38522// have a common shape prefix, *batch_shape*.
38523//
38524// The `sizes` tensor specifies repeat counts for each field.  The repeat
38525// count (last dimension) of a each tensor in `values` must be greater
38526// than or equal to corresponding repeat count in `sizes`.
38527//
38528// A `message_type` name must be provided to give context for the field
38529// names. The actual message descriptor can be looked up either in the
38530// linked-in descriptor pool or a filename provided by the caller using
38531// the `descriptor_source` attribute.
38532//
38533// The `descriptor_source` attribute selects a source of protocol
38534// descriptors to consult when looking up `message_type`. This may be a
38535// filename containing a serialized `FileDescriptorSet` message,
38536// or the special value `local://`, in which case only descriptors linked
38537// into the code will be searched; the filename can be on any filesystem
38538// accessible to TensorFlow.
38539//
38540// You can build a `descriptor_source` file using the `--descriptor_set_out`
38541// and `--include_imports` options to the protocol compiler `protoc`.
38542//
38543// The `local://` database only covers descriptors linked into the
38544// code via C++ libraries, not Python imports. You can link in a proto descriptor
38545// by creating a cc_library target with alwayslink=1.
38546//
38547// There are a few special cases in the value mapping:
38548//
38549// Submessage and group fields must be pre-serialized as TensorFlow strings.
38550//
38551// TensorFlow lacks support for unsigned int64s, so they must be
38552// represented as `tf.int64` with the same twos-complement bit pattern
38553// (the obvious way).
38554//
38555// Unsigned int32 values can be represented exactly with `tf.int64`, or
38556// with sign wrapping if the input is of type `tf.int32`.
38557//
38558// Arguments:
38559//	sizes: Tensor of int32 with shape `[batch_shape, len(field_names)]`.
38560//	values: List of tensors containing values for the corresponding field.
38561//	field_names: List of strings containing proto field names.
38562//	message_type: Name of the proto message type to decode.
38563//
38564// Returns Tensor of serialized protos with shape `batch_shape`.
38565func EncodeProto(scope *Scope, sizes tf.Output, values []tf.Output, field_names []string, message_type string, optional ...EncodeProtoAttr) (bytes tf.Output) {
38566	if scope.Err() != nil {
38567		return
38568	}
38569	attrs := map[string]interface{}{"field_names": field_names, "message_type": message_type}
38570	for _, a := range optional {
38571		a(attrs)
38572	}
38573	opspec := tf.OpSpec{
38574		Type: "EncodeProto",
38575		Input: []tf.Input{
38576			sizes, tf.OutputList(values),
38577		},
38578		Attrs: attrs,
38579	}
38580	op := scope.AddOperation(opspec)
38581	return op.Output(0)
38582}
38583
38584// Creates an Optional variant with no value.
38585func OptionalNone(scope *Scope) (optional tf.Output) {
38586	if scope.Err() != nil {
38587		return
38588	}
38589	opspec := tf.OpSpec{
38590		Type: "OptionalNone",
38591	}
38592	op := scope.AddOperation(opspec)
38593	return op.Output(0)
38594}
38595
38596// DecodeProtoV2Attr is an optional argument to DecodeProtoV2.
38597type DecodeProtoV2Attr func(optionalAttr)
38598
38599// DecodeProtoV2DescriptorSource sets the optional descriptor_source attribute to value.
38600//
38601// value: Either the special value `local://` or a path to a file containing
38602// a serialized `FileDescriptorSet`.
38603// If not specified, defaults to "local://"
38604func DecodeProtoV2DescriptorSource(value string) DecodeProtoV2Attr {
38605	return func(m optionalAttr) {
38606		m["descriptor_source"] = value
38607	}
38608}
38609
38610// DecodeProtoV2MessageFormat sets the optional message_format attribute to value.
38611//
38612// value: Either `binary` or `text`.
38613// If not specified, defaults to "binary"
38614func DecodeProtoV2MessageFormat(value string) DecodeProtoV2Attr {
38615	return func(m optionalAttr) {
38616		m["message_format"] = value
38617	}
38618}
38619
38620// DecodeProtoV2Sanitize sets the optional sanitize attribute to value.
38621//
38622// value: Whether to sanitize the result or not.
38623// If not specified, defaults to false
38624func DecodeProtoV2Sanitize(value bool) DecodeProtoV2Attr {
38625	return func(m optionalAttr) {
38626		m["sanitize"] = value
38627	}
38628}
38629
38630// The op extracts fields from a serialized protocol buffers message into tensors.
38631//
38632// The `decode_proto` op extracts fields from a serialized protocol buffers
38633// message into tensors.  The fields in `field_names` are decoded and converted
38634// to the corresponding `output_types` if possible.
38635//
38636// A `message_type` name must be provided to give context for the field
38637// names. The actual message descriptor can be looked up either in the
38638// linked-in descriptor pool or a filename provided by the caller using
38639// the `descriptor_source` attribute.
38640//
38641// Each output tensor is a dense tensor. This means that it is padded to
38642// hold the largest number of repeated elements seen in the input
38643// minibatch. (The shape is also padded by one to prevent zero-sized
38644// dimensions). The actual repeat counts for each example in the
38645// minibatch can be found in the `sizes` output. In many cases the output
38646// of `decode_proto` is fed immediately into tf.squeeze if missing values
38647// are not a concern. When using tf.squeeze, always pass the squeeze
38648// dimension explicitly to avoid surprises.
38649//
38650// For the most part, the mapping between Proto field types and
38651// TensorFlow dtypes is straightforward. However, there are a few
38652// special cases:
38653//
38654// - A proto field that contains a submessage or group can only be converted
38655// to `DT_STRING` (the serialized submessage). This is to reduce the
38656// complexity of the API. The resulting string can be used as input
38657// to another instance of the decode_proto op.
38658//
38659// - TensorFlow lacks support for unsigned integers. The ops represent uint64
38660// types as a `DT_INT64` with the same twos-complement bit pattern
38661// (the obvious way). Unsigned int32 values can be represented exactly by
38662// specifying type `DT_INT64`, or using twos-complement if the caller
38663// specifies `DT_INT32` in the `output_types` attribute.
38664//
38665// The `descriptor_source` attribute selects a source of protocol
38666// descriptors to consult when looking up `message_type`. This may be a
38667// filename containing a serialized `FileDescriptorSet` message,
38668// or the special value `local://`, in which case only descriptors linked
38669// into the code will be searched; the filename can be on any filesystem
38670// accessible to TensorFlow.
38671//
38672// You can build a `descriptor_source` file using the `--descriptor_set_out`
38673// and `--include_imports` options to the protocol compiler `protoc`.
38674//
38675// The `local://` database only covers descriptors linked into the
38676// code via C++ libraries, not Python imports. You can link in a proto descriptor
38677// by creating a cc_library target with alwayslink=1.
38678//
38679// Both binary and text proto serializations are supported, and can be
38680// chosen using the `format` attribute.
38681//
38682// Arguments:
38683//	bytes: Tensor of serialized protos with shape `batch_shape`.
38684//	message_type: Name of the proto message type to decode.
38685//	field_names: List of strings containing proto field names. An extension field can be decoded
38686// by using its full name, e.g. EXT_PACKAGE.EXT_FIELD_NAME.
38687//	output_types: List of TF types to use for the respective field in field_names.
38688//
38689// Returns Tensor of int32 with shape `[batch_shape, len(field_names)]`.
38690// Each entry is the number of values found for the corresponding field.
38691// Optional fields may have 0 or 1 values.List of tensors containing values for the corresponding field.
38692// `values[i]` has datatype `output_types[i]`
38693// and shape `[batch_shape, max(sizes[...,i])]`.
38694func DecodeProtoV2(scope *Scope, bytes tf.Output, message_type string, field_names []string, output_types []tf.DataType, optional ...DecodeProtoV2Attr) (sizes tf.Output, values []tf.Output) {
38695	if scope.Err() != nil {
38696		return
38697	}
38698	attrs := map[string]interface{}{"message_type": message_type, "field_names": field_names, "output_types": output_types}
38699	for _, a := range optional {
38700		a(attrs)
38701	}
38702	opspec := tf.OpSpec{
38703		Type: "DecodeProtoV2",
38704		Input: []tf.Input{
38705			bytes,
38706		},
38707		Attrs: attrs,
38708	}
38709	op := scope.AddOperation(opspec)
38710	if scope.Err() != nil {
38711		return
38712	}
38713	var idx int
38714	var err error
38715	sizes = op.Output(idx)
38716	if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
38717		scope.UpdateErr("DecodeProtoV2", err)
38718		return
38719	}
38720	return sizes, values
38721}
38722
38723// Creates a dataset that splits a SparseTensor into elements row-wise.
38724func SparseTensorSliceDataset(scope *Scope, indices tf.Output, values tf.Output, dense_shape tf.Output) (handle tf.Output) {
38725	if scope.Err() != nil {
38726		return
38727	}
38728	opspec := tf.OpSpec{
38729		Type: "SparseTensorSliceDataset",
38730		Input: []tf.Input{
38731			indices, values, dense_shape,
38732		},
38733	}
38734	op := scope.AddOperation(opspec)
38735	return op.Output(0)
38736}
38737
38738// Returns x / y element-wise for real types.
38739//
38740// If `x` and `y` are reals, this will return the floating-point division.
38741//
38742// *NOTE*: `Div` supports broadcasting. More about broadcasting
38743// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
38744func RealDiv(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
38745	if scope.Err() != nil {
38746		return
38747	}
38748	opspec := tf.OpSpec{
38749		Type: "RealDiv",
38750		Input: []tf.Input{
38751			x, y,
38752		},
38753	}
38754	op := scope.AddOperation(opspec)
38755	return op.Output(0)
38756}
38757
38758// Creates a dataset that concatenates `input_dataset` with `another_dataset`.
38759func ConcatenateDataset(scope *Scope, input_dataset tf.Output, another_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
38760	if scope.Err() != nil {
38761		return
38762	}
38763	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
38764	opspec := tf.OpSpec{
38765		Type: "ConcatenateDataset",
38766		Input: []tf.Input{
38767			input_dataset, another_dataset,
38768		},
38769		Attrs: attrs,
38770	}
38771	op := scope.AddOperation(opspec)
38772	return op.Output(0)
38773}
38774
38775// Computes the grayscale dilation of 4-D `input` and 3-D `filter` tensors.
38776//
38777// The `input` tensor has shape `[batch, in_height, in_width, depth]` and the
38778// `filter` tensor has shape `[filter_height, filter_width, depth]`, i.e., each
38779// input channel is processed independently of the others with its own structuring
38780// function. The `output` tensor has shape
38781// `[batch, out_height, out_width, depth]`. The spatial dimensions of the output
38782// tensor depend on the `padding` algorithm. We currently only support the default
38783// "NHWC" `data_format`.
38784//
38785// In detail, the grayscale morphological 2-D dilation is the max-sum correlation
38786// (for consistency with `conv2d`, we use unmirrored filters):
38787//
38788//     output[b, y, x, c] =
38789//        max_{dy, dx} input[b,
38790//                           strides[1] * y + rates[1] * dy,
38791//                           strides[2] * x + rates[2] * dx,
38792//                           c] +
38793//                     filter[dy, dx, c]
38794//
38795// Max-pooling is a special case when the filter has size equal to the pooling
38796// kernel size and contains all zeros.
38797//
38798// Note on duality: The dilation of `input` by the `filter` is equal to the
38799// negation of the erosion of `-input` by the reflected `filter`.
38800//
38801// Arguments:
38802//	input: 4-D with shape `[batch, in_height, in_width, depth]`.
38803//	filter: 3-D with shape `[filter_height, filter_width, depth]`.
38804//	strides: The stride of the sliding window for each dimension of the input
38805// tensor. Must be: `[1, stride_height, stride_width, 1]`.
38806//	rates: The input stride for atrous morphological dilation. Must be:
38807// `[1, rate_height, rate_width, 1]`.
38808//	padding: The type of padding algorithm to use.
38809//
38810// Returns 4-D with shape `[batch, out_height, out_width, depth]`.
38811func Dilation2D(scope *Scope, input tf.Output, filter tf.Output, strides []int64, rates []int64, padding string) (output tf.Output) {
38812	if scope.Err() != nil {
38813		return
38814	}
38815	attrs := map[string]interface{}{"strides": strides, "rates": rates, "padding": padding}
38816	opspec := tf.OpSpec{
38817		Type: "Dilation2D",
38818		Input: []tf.Input{
38819			input, filter,
38820		},
38821		Attrs: attrs,
38822	}
38823	op := scope.AddOperation(opspec)
38824	return op.Output(0)
38825}
38826
38827// Creates a dataset that shuffles and repeats elements from `input_dataset`
38828//
38829// pseudorandomly.
38830//
38831// Arguments:
38832//
38833//	buffer_size: The number of output elements to buffer in an iterator over
38834// this dataset. Compare with the `min_after_dequeue` attr when creating a
38835// `RandomShuffleQueue`.
38836//	seed: A scalar seed for the random number generator. If either `seed` or
38837// `seed2` is set to be non-zero, the random number generator is seeded
38838// by the given seed.  Otherwise, a random seed is used.
38839//	seed2: A second scalar seed to avoid seed collision.
38840//	count: A scalar representing the number of times the underlying dataset
38841// should be repeated. The default is `-1`, which results in infinite repetition.
38842//
38843//
38844func ShuffleAndRepeatDataset(scope *Scope, input_dataset tf.Output, buffer_size tf.Output, seed tf.Output, seed2 tf.Output, count tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
38845	if scope.Err() != nil {
38846		return
38847	}
38848	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
38849	opspec := tf.OpSpec{
38850		Type: "ShuffleAndRepeatDataset",
38851		Input: []tf.Input{
38852			input_dataset, buffer_size, seed, seed2, count,
38853		},
38854		Attrs: attrs,
38855	}
38856	op := scope.AddOperation(opspec)
38857	return op.Output(0)
38858}
38859
38860// Creates a dataset that caches elements from `input_dataset`.
38861//
38862// A CacheDataset will iterate over the input_dataset, and store tensors. If the
38863// cache already exists, the cache will be used. If the cache is inappropriate
38864// (e.g. cannot be opened, contains tensors of the wrong shape / size), an error
38865// will the returned when used.
38866//
38867// Arguments:
38868//
38869//	filename: A path on the filesystem where we should cache the dataset. Note: this
38870// will be a directory.
38871//
38872//
38873func CacheDataset(scope *Scope, input_dataset tf.Output, filename tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
38874	if scope.Err() != nil {
38875		return
38876	}
38877	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
38878	opspec := tf.OpSpec{
38879		Type: "CacheDataset",
38880		Input: []tf.Input{
38881			input_dataset, filename,
38882		},
38883		Attrs: attrs,
38884	}
38885	op := scope.AddOperation(opspec)
38886	return op.Output(0)
38887}
38888
38889// Creates a dataset that emits the records from one or more binary files.
38890//
38891// Arguments:
38892//	filenames: A scalar or a vector containing the name(s) of the file(s) to be
38893// read.
38894//	header_bytes: A scalar representing the number of bytes to skip at the
38895// beginning of a file.
38896//	record_bytes: A scalar representing the number of bytes in each record.
38897//	footer_bytes: A scalar representing the number of bytes to skip at the end
38898// of a file.
38899//	buffer_size: A scalar representing the number of bytes to buffer. Must be > 0.
38900func FixedLengthRecordDataset(scope *Scope, filenames tf.Output, header_bytes tf.Output, record_bytes tf.Output, footer_bytes tf.Output, buffer_size tf.Output) (handle tf.Output) {
38901	if scope.Err() != nil {
38902		return
38903	}
38904	opspec := tf.OpSpec{
38905		Type: "FixedLengthRecordDataset",
38906		Input: []tf.Input{
38907			filenames, header_bytes, record_bytes, footer_bytes, buffer_size,
38908		},
38909	}
38910	op := scope.AddOperation(opspec)
38911	return op.Output(0)
38912}
38913
38914// Gradients for batch normalization.
38915//
38916// DEPRECATED at GraphDef version 9: Use tf.nn.batch_normalization()
38917//
38918// This op is deprecated. See `tf.nn.batch_normalization`.
38919//
38920// Arguments:
38921//	t: A 4D input Tensor.
38922//	m: A 1D mean Tensor with size matching the last dimension of t.
38923// This is the first output from tf.nn.moments,
38924// or a saved moving average thereof.
38925//	v: A 1D variance Tensor with size matching the last dimension of t.
38926// This is the second output from tf.nn.moments,
38927// or a saved moving average thereof.
38928//	gamma: A 1D gamma Tensor with size matching the last dimension of t.
38929// If "scale_after_normalization" is true, this Tensor will be multiplied
38930// with the normalized Tensor.
38931//	backprop: 4D backprop Tensor.
38932//	variance_epsilon: A small float number to avoid dividing by 0.
38933//	scale_after_normalization: A bool indicating whether the resulted tensor
38934// needs to be multiplied with gamma.
38935//
38936// Returns 4D backprop tensor for input.1D backprop tensor for mean.1D backprop tensor for variance.1D backprop tensor for beta.1D backprop tensor for gamma.
38937func BatchNormWithGlobalNormalizationGrad(scope *Scope, t tf.Output, m tf.Output, v tf.Output, gamma tf.Output, backprop tf.Output, variance_epsilon float32, scale_after_normalization bool) (dx tf.Output, dm tf.Output, dv tf.Output, db tf.Output, dg tf.Output) {
38938	if scope.Err() != nil {
38939		return
38940	}
38941	attrs := map[string]interface{}{"variance_epsilon": variance_epsilon, "scale_after_normalization": scale_after_normalization}
38942	opspec := tf.OpSpec{
38943		Type: "BatchNormWithGlobalNormalizationGrad",
38944		Input: []tf.Input{
38945			t, m, v, gamma, backprop,
38946		},
38947		Attrs: attrs,
38948	}
38949	op := scope.AddOperation(opspec)
38950	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
38951}
38952
38953// Creates a dataset that emits the records from one or more TFRecord files.
38954//
38955// Arguments:
38956//	filenames: A scalar or vector containing the name(s) of the file(s) to be
38957// read.
38958//	compression_type: A scalar containing either (i) the empty string (no
38959// compression), (ii) "ZLIB", or (iii) "GZIP".
38960//	buffer_size: A scalar representing the number of bytes to buffer. A value of
38961// 0 means no buffering will be performed.
38962func TFRecordDataset(scope *Scope, filenames tf.Output, compression_type tf.Output, buffer_size tf.Output) (handle tf.Output) {
38963	if scope.Err() != nil {
38964		return
38965	}
38966	opspec := tf.OpSpec{
38967		Type: "TFRecordDataset",
38968		Input: []tf.Input{
38969			filenames, compression_type, buffer_size,
38970		},
38971	}
38972	op := scope.AddOperation(opspec)
38973	return op.Output(0)
38974}
38975
38976// ExperimentalStatsAggregatorHandleAttr is an optional argument to ExperimentalStatsAggregatorHandle.
38977type ExperimentalStatsAggregatorHandleAttr func(optionalAttr)
38978
38979// ExperimentalStatsAggregatorHandleContainer sets the optional container attribute to value.
38980// If not specified, defaults to ""
38981func ExperimentalStatsAggregatorHandleContainer(value string) ExperimentalStatsAggregatorHandleAttr {
38982	return func(m optionalAttr) {
38983		m["container"] = value
38984	}
38985}
38986
38987// ExperimentalStatsAggregatorHandleSharedName sets the optional shared_name attribute to value.
38988// If not specified, defaults to ""
38989func ExperimentalStatsAggregatorHandleSharedName(value string) ExperimentalStatsAggregatorHandleAttr {
38990	return func(m optionalAttr) {
38991		m["shared_name"] = value
38992	}
38993}
38994
38995// Creates a statistics manager resource.
38996func ExperimentalStatsAggregatorHandle(scope *Scope, optional ...ExperimentalStatsAggregatorHandleAttr) (handle tf.Output) {
38997	if scope.Err() != nil {
38998		return
38999	}
39000	attrs := map[string]interface{}{}
39001	for _, a := range optional {
39002		a(attrs)
39003	}
39004	opspec := tf.OpSpec{
39005		Type: "ExperimentalStatsAggregatorHandle",
39006
39007		Attrs: attrs,
39008	}
39009	op := scope.AddOperation(opspec)
39010	return op.Output(0)
39011}
39012
39013// A container for an iterator resource.
39014//
39015// Returns A handle to the iterator that can be passed to a "MakeIterator" or
39016// "IteratorGetNext" op. In contrast to Iterator, AnonymousIterator prevents
39017// resource sharing by name, and does not keep a reference to the resource
39018// container.
39019func AnonymousIterator(scope *Scope, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
39020	if scope.Err() != nil {
39021		return
39022	}
39023	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
39024	opspec := tf.OpSpec{
39025		Type: "AnonymousIterator",
39026
39027		Attrs: attrs,
39028	}
39029	op := scope.AddOperation(opspec)
39030	return op.Output(0)
39031}
39032
39033// Adjust the contrast of one or more images.
39034//
39035// `images` is a tensor of at least 3 dimensions.  The last 3 dimensions are
39036// interpreted as `[height, width, channels]`.  The other dimensions only
39037// represent a collection of images, such as `[batch, height, width, channels].`
39038//
39039// Contrast is adjusted independently for each channel of each image.
39040//
39041// For each channel, the Op first computes the mean of the image pixels in the
39042// channel and then adjusts each component of each pixel to
39043// `(x - mean) * contrast_factor + mean`.
39044//
39045// Arguments:
39046//	images: Images to adjust.  At least 3-D.
39047//	contrast_factor: A float multiplier for adjusting contrast.
39048//
39049// Returns The contrast-adjusted image or images.
39050func AdjustContrastv2(scope *Scope, images tf.Output, contrast_factor tf.Output) (output tf.Output) {
39051	if scope.Err() != nil {
39052		return
39053	}
39054	opspec := tf.OpSpec{
39055		Type: "AdjustContrastv2",
39056		Input: []tf.Input{
39057			images, contrast_factor,
39058		},
39059	}
39060	op := scope.AddOperation(opspec)
39061	return op.Output(0)
39062}
39063
39064// Gets the next output from the given iterator .
39065func IteratorGetNext(scope *Scope, iterator tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (components []tf.Output) {
39066	if scope.Err() != nil {
39067		return
39068	}
39069	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
39070	opspec := tf.OpSpec{
39071		Type: "IteratorGetNext",
39072		Input: []tf.Input{
39073			iterator,
39074		},
39075		Attrs: attrs,
39076	}
39077	op := scope.AddOperation(opspec)
39078	if scope.Err() != nil {
39079		return
39080	}
39081	var idx int
39082	var err error
39083	if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
39084		scope.UpdateErr("IteratorGetNext", err)
39085		return
39086	}
39087	return components
39088}
39089
39090// Outputs the single element from the given dataset.
39091//
39092// Arguments:
39093//	dataset: A handle to a dataset that contains a single element.
39094//
39095//
39096//
39097// Returns The components of the single element of `input`.
39098func DatasetToSingleElement(scope *Scope, dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (components []tf.Output) {
39099	if scope.Err() != nil {
39100		return
39101	}
39102	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
39103	opspec := tf.OpSpec{
39104		Type: "DatasetToSingleElement",
39105		Input: []tf.Input{
39106			dataset,
39107		},
39108		Attrs: attrs,
39109	}
39110	op := scope.AddOperation(opspec)
39111	if scope.Err() != nil {
39112		return
39113	}
39114	var idx int
39115	var err error
39116	if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
39117		scope.UpdateErr("DatasetToSingleElement", err)
39118		return
39119	}
39120	return components
39121}
39122
39123// Converts the given `resource_handle` representing an iterator to a string.
39124//
39125// Arguments:
39126//	resource_handle: A handle to an iterator resource.
39127//
39128// Returns A string representation of the given handle.
39129func IteratorToStringHandle(scope *Scope, resource_handle tf.Output) (string_handle tf.Output) {
39130	if scope.Err() != nil {
39131		return
39132	}
39133	opspec := tf.OpSpec{
39134		Type: "IteratorToStringHandle",
39135		Input: []tf.Input{
39136			resource_handle,
39137		},
39138	}
39139	op := scope.AddOperation(opspec)
39140	return op.Output(0)
39141}
39142
39143// IteratorFromStringHandleAttr is an optional argument to IteratorFromStringHandle.
39144type IteratorFromStringHandleAttr func(optionalAttr)
39145
39146// IteratorFromStringHandleOutputTypes sets the optional output_types attribute to value.
39147//
39148// value: If specified, defines the type of each tuple component in an
39149// element produced by the resulting iterator.
39150// If not specified, defaults to <>
39151//
39152// REQUIRES: len(value) >= 0
39153func IteratorFromStringHandleOutputTypes(value []tf.DataType) IteratorFromStringHandleAttr {
39154	return func(m optionalAttr) {
39155		m["output_types"] = value
39156	}
39157}
39158
39159// IteratorFromStringHandleOutputShapes sets the optional output_shapes attribute to value.
39160//
39161// value: If specified, defines the shape of each tuple component in an
39162// element produced by the resulting iterator.
39163// If not specified, defaults to <>
39164//
39165// REQUIRES: len(value) >= 0
39166func IteratorFromStringHandleOutputShapes(value []tf.Shape) IteratorFromStringHandleAttr {
39167	return func(m optionalAttr) {
39168		m["output_shapes"] = value
39169	}
39170}
39171
39172// Converts the given string representing a handle to an iterator to a resource.
39173//
39174// Arguments:
39175//	string_handle: A string representation of the given handle.
39176//
39177// Returns A handle to an iterator resource.
39178func IteratorFromStringHandle(scope *Scope, string_handle tf.Output, optional ...IteratorFromStringHandleAttr) (resource_handle tf.Output) {
39179	if scope.Err() != nil {
39180		return
39181	}
39182	attrs := map[string]interface{}{}
39183	for _, a := range optional {
39184		a(attrs)
39185	}
39186	opspec := tf.OpSpec{
39187		Type: "IteratorFromStringHandle",
39188		Input: []tf.Input{
39189			string_handle,
39190		},
39191		Attrs: attrs,
39192	}
39193	op := scope.AddOperation(opspec)
39194	return op.Output(0)
39195}
39196